From a7783e307c6fb04a83612c601e21594535d8d218 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sun, 1 Feb 2026 21:22:28 +0000 Subject: [PATCH 01/42] ## Python SDK Changes: * `mistral.beta.conversations.restart_stream()`: `request.agent_version` **Changed** **Breaking** :warning: * `mistral.beta.conversations.start()`: `request.agent_version` **Changed** **Breaking** :warning: * `mistral.beta.conversations.list()`: `response.[].[agent_conversation].agent_version` **Changed** **Breaking** :warning: * `mistral.beta.conversations.get()`: `response.[agent_conversation].agent_version` **Changed** **Breaking** :warning: * `mistral.beta.conversations.restart()`: `request.agent_version` **Changed** **Breaking** :warning: * `mistral.beta.conversations.start_stream()`: `request.agent_version` **Changed** **Breaking** :warning: * `mistral.beta.agents.get()`: `request.agent_version` **Changed** **Breaking** :warning: * `mistral.beta.agents.get_version()`: `request.version` **Changed** **Breaking** :warning: * `mistral.beta.agents.list_version_aliases()`: **Added** * `mistral.models.list()`: `response.data.[].[fine-tuned].capabilities.audio_transcription` **Added** * `mistral.models.retrieve()`: `response.[base].capabilities.audio_transcription` **Added** * `mistral.beta.agents.create_version_alias()`: **Added** * `mistral.files.list()`: `request.mimetypes` **Added** --- .speakeasy/gen.lock | 261 +++++++++--- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 12 +- README.md | 4 +- RELEASES.md | 12 +- docs/models/agentaliasresponse.md | 11 + docs/models/agentconversation.md | 22 +- docs/models/agentconversationagentversion.md | 17 + ...tsapiv1agentscreateorupdatealiasrequest.md | 10 + docs/models/agentsapiv1agentsgetrequest.md | 8 +- .../agentsapiv1agentsgetversionrequest.md | 2 +- ...ntsapiv1agentslistversionaliasesrequest.md | 8 + docs/models/agentversion.md | 17 + docs/models/conversationrequest.md | 2 +- docs/models/conversationrestartrequest.md | 2 +- .../conversationrestartrequestagentversion.md | 19 + .../conversationrestartstreamrequest.md | 2 +- ...rsationrestartstreamrequestagentversion.md | 19 + docs/models/conversationstreamrequest.md | 2 +- .../conversationstreamrequestagentversion.md | 17 + docs/models/filesapirouteslistfilesrequest.md | 3 +- docs/models/message.md | 19 + docs/models/mistralpromptmode.md | 4 + docs/models/modelcapabilities.md | 23 +- docs/models/queryparamagentversion.md | 17 + docs/models/realtimetranscriptionerror.md | 9 + .../realtimetranscriptionerrordetail.md | 9 + docs/models/realtimetranscriptionsession.md | 10 + .../realtimetranscriptionsessioncreated.md | 9 + .../realtimetranscriptionsessionupdated.md | 9 + docs/sdks/conversations/README.md | 8 +- docs/sdks/files/README.md | 1 + docs/sdks/mistralagents/README.md | 104 ++++- src/mistralai/_version.py | 4 +- src/mistralai/conversations.py | 56 ++- src/mistralai/files.py | 6 + src/mistralai/mistral_agents.py | 399 +++++++++++++++++- src/mistralai/models/__init__.py | 103 +++++ src/mistralai/models/agentaliasresponse.py | 23 + src/mistralai/models/agentconversation.py | 18 +- ..._api_v1_agents_create_or_update_aliasop.py | 26 ++ .../agents_api_v1_agents_get_versionop.py | 4 +- .../models/agents_api_v1_agents_getop.py | 15 +- ...ts_api_v1_agents_list_version_aliasesop.py | 16 + src/mistralai/models/conversationrequest.py | 10 +- .../models/conversationrestartrequest.py | 22 +- .../conversationrestartstreamrequest.py | 24 +- .../models/conversationstreamrequest.py | 14 +- .../models/files_api_routes_list_filesop.py | 9 +- src/mistralai/models/mistralpromptmode.py | 4 + src/mistralai/models/modelcapabilities.py | 3 + .../models/realtimetranscriptionerror.py | 27 ++ .../realtimetranscriptionerrordetail.py | 29 ++ .../models/realtimetranscriptionsession.py | 20 + .../realtimetranscriptionsessioncreated.py | 30 ++ .../realtimetranscriptionsessionupdated.py | 30 ++ uv.lock | 2 +- 57 files changed, 1405 insertions(+), 163 deletions(-) create mode 100644 docs/models/agentaliasresponse.md create mode 100644 docs/models/agentconversationagentversion.md create mode 100644 docs/models/agentsapiv1agentscreateorupdatealiasrequest.md create mode 100644 docs/models/agentsapiv1agentslistversionaliasesrequest.md create mode 100644 docs/models/agentversion.md create mode 100644 docs/models/conversationrestartrequestagentversion.md create mode 100644 docs/models/conversationrestartstreamrequestagentversion.md create mode 100644 docs/models/conversationstreamrequestagentversion.md create mode 100644 docs/models/message.md create mode 100644 docs/models/queryparamagentversion.md create mode 100644 docs/models/realtimetranscriptionerror.md create mode 100644 docs/models/realtimetranscriptionerrordetail.md create mode 100644 docs/models/realtimetranscriptionsession.md create mode 100644 docs/models/realtimetranscriptionsessioncreated.md create mode 100644 docs/models/realtimetranscriptionsessionupdated.md create mode 100644 src/mistralai/models/agentaliasresponse.py create mode 100644 src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py create mode 100644 src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py create mode 100644 src/mistralai/models/realtimetranscriptionerror.py create mode 100644 src/mistralai/models/realtimetranscriptionerrordetail.py create mode 100644 src/mistralai/models/realtimetranscriptionsession.py create mode 100644 src/mistralai/models/realtimetranscriptionsessioncreated.py create mode 100644 src/mistralai/models/realtimetranscriptionsessionupdated.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index b89ea6b3..9d51b30a 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,19 +1,19 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: a61cb56fd9942dd20124e0422444bac3 + docChecksum: cc385dce976ac06e6d062e992f0ee380 docVersion: 1.0.0 speakeasyVersion: 1.685.0 generationVersion: 2.794.1 - releaseVersion: 1.11.1 - configChecksum: 1a6d0af8e5d88c97b7e947763e633c3d + releaseVersion: 1.12.0 + configChecksum: 862d9a8667674972c091f9db84d42ba0 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true persistentEdits: - generation_id: 031e6fcc-162d-451f-a98c-f65bf3605643 - pristine_commit_hash: 08ac7141d4e4dffd4a3327da51bd2a70d50ff68f - pristine_tree_hash: aeb852eedd1ebeb4411a5c0f286d53884362af3b + generation_id: 8b0735b6-5924-48f1-ade2-47cb374c76bc + pristine_commit_hash: a9971b936f50486e2e4ceef95d0b2c4708633219 + pristine_tree_hash: 51b8a57de0bf62da607fe0023eec1124458ebee9 features: python: additionalDependencies: 1.0.0 @@ -63,10 +63,18 @@ trackedFiles: id: ffdbb4c53c87 last_write_checksum: sha1:ec6c799658040b3c75d6ae0572bb391c6aea3fd4 pristine_git_object: ee054dd349848eff144d7064c319c3c8434bdc6c + docs/models/agentaliasresponse.md: + id: 5ac4721d8947 + last_write_checksum: sha1:15dcc6820e89d2c6bb799e331463419ce29ec167 + pristine_git_object: aa531ec5d1464f95e3938f148c1e88efc30fa6a6 docs/models/agentconversation.md: id: 3590c1a566fa - last_write_checksum: sha1:a88c8e10a9de2bc99cabd38ab9fc775a2d33e9ef - pristine_git_object: 92fd673c0710889ae3f1d77f82c32113f39457b7 + last_write_checksum: sha1:264d78815c3999bac377ab3f8c08a264178baf43 + pristine_git_object: a2d617316f1965acfabf7d2fe74334de16213829 + docs/models/agentconversationagentversion.md: + id: 468e0d1614bb + last_write_checksum: sha1:6e60bf4a18d791d694e90c89bdb8cc38e43c324b + pristine_git_object: 668a8dc0f0c51a231a73aed51b2db13de243a038 docs/models/agentconversationobject.md: id: cfd35d9dd4f2 last_write_checksum: sha1:112552d4a241967cf0a7dcb981428e7e0715dc34 @@ -111,22 +119,30 @@ trackedFiles: id: ed24a6d647a0 last_write_checksum: sha1:ff5dfde6cc19f09c83afb5b4f0f103096df6691d pristine_git_object: 70e143b030d3041c7538ecdacb8f5f9f8d1b5c92 + docs/models/agentsapiv1agentscreateorupdatealiasrequest.md: + id: c09ec9946094 + last_write_checksum: sha1:0883217b4bad21f5d4f8162ca72005bf9105a93f + pristine_git_object: 79406434cc6ff3d1485089f35639d6284f66d6cb docs/models/agentsapiv1agentsdeleterequest.md: id: 0faaaa59add9 last_write_checksum: sha1:2a34269e682bb910b83814b4d730ba2ce07f8cb2 pristine_git_object: 2799f41817ab0f7a22b49b4ff895c8308525953c docs/models/agentsapiv1agentsgetrequest.md: id: 01740ae62cff - last_write_checksum: sha1:0ed4bb58c94493e21826b38d33c2498de9150b98 - pristine_git_object: 825e03a02e14d03ce47022df840c118de8cd921f + last_write_checksum: sha1:9c4f6d88f29c39238757547da605ecb7106e76c2 + pristine_git_object: c71d4419afd3b51713e154b8021d4fe2b49d8af5 docs/models/agentsapiv1agentsgetversionrequest.md: id: 88ed22b85cde - last_write_checksum: sha1:c6706d79c9253829cf4285c99d49873fa56596bf - pristine_git_object: 7617d2748c86f537bf125d90e67f41df71c1e5cd + last_write_checksum: sha1:0ef23807c8efa2662144da66745045abdd2cb60a + pristine_git_object: 96a7358943a69e871a2bb7f0f30d6fe2bb8dff3d docs/models/agentsapiv1agentslistrequest.md: id: c2720c209527 last_write_checksum: sha1:cb599d1583ee9374d44695f5ee7efe79dbb8a503 pristine_git_object: 8cba13253d42a180b06eab8c10297ef362fb434d + docs/models/agentsapiv1agentslistversionaliasesrequest.md: + id: 69c8bce2c017 + last_write_checksum: sha1:4083fc80627b2cc04fd271df21393944730ef1ba + pristine_git_object: 3083bf92641404738948cd57306eac978b701551 docs/models/agentsapiv1agentslistversionsrequest.md: id: 0bc44ed8d6bb last_write_checksum: sha1:315790552fc5b2b3a6c4f7be2eb33100133abe18 @@ -223,6 +239,10 @@ trackedFiles: id: a39223b88fc9 last_write_checksum: sha1:925ef5852c2031c9bf2608577e55edbc36708730 pristine_git_object: 1752ee6861d23c6abaa6b748f4ff43e9545505ec + docs/models/agentversion.md: + id: b0aa02d6c085 + last_write_checksum: sha1:f6fcf351de43eed5345f88f5cb6a2bf928a594d9 + pristine_git_object: fd4b6a3ea4ade6c9f62594b377c8e791a50211e7 docs/models/apiendpoint.md: id: be613fd9b947 last_write_checksum: sha1:4d984c11248f7da42c949164e69b53995d5942c4 @@ -589,8 +609,8 @@ trackedFiles: pristine_git_object: db3a441bde0d086bccda4814ddfbf737539681a6 docs/models/conversationrequest.md: id: dd7f4d6807f2 - last_write_checksum: sha1:4ecca434753494ff0af66952655af92293690702 - pristine_git_object: 04378ae34c754f2ed67a34d14923c7b0d1605d4e + last_write_checksum: sha1:33dec32dbf20979ac04763e99a82e90ee474fef4 + pristine_git_object: 2b4ff8ef3398561d9b3e192a51ec22f64880389c docs/models/conversationresponse.md: id: 2eccf42d48af last_write_checksum: sha1:69059d02d5354897d23c9d9654d38a85c7e0afc6 @@ -601,24 +621,36 @@ trackedFiles: pristine_git_object: bea66e5277feca4358dd6447959ca945eff2171a docs/models/conversationrestartrequest.md: id: 558e9daa00bd - last_write_checksum: sha1:97c25a370411e1bce144c61272ca8f32066112be - pristine_git_object: f389a1e5c42cf0f73784d5563eaa6d0b29e0d69e + last_write_checksum: sha1:0e33f56f69313b9111b3394ecca693871d48acfa + pristine_git_object: d98653127fd522e35323b310d2342ccc08927962 + docs/models/conversationrestartrequestagentversion.md: + id: e6ea289c6b23 + last_write_checksum: sha1:a5abf95a81b7e080bd3cadf65c2db38ca458573f + pristine_git_object: 019ba301411729ec2c8078404adae998b3b9dacd docs/models/conversationrestartrequesthandoffexecution.md: id: faee86c7832c last_write_checksum: sha1:44728be55e96193e6f433e2f46f8f749f1671097 pristine_git_object: 5790624b82ce47ea99e5c25c825fbc25145bfb8e docs/models/conversationrestartstreamrequest.md: id: 01b92ab1b56d - last_write_checksum: sha1:90f0ab9aba1919cbc2b9cfc8e5ec9d80f8f3910c - pristine_git_object: d7358dc20b2b60cb287b3c4a1c174a7883871a54 + last_write_checksum: sha1:aa3d30800417e04f741324d60529f3190ea9cd16 + pristine_git_object: a5f8cbe73ed1ce28c82d76f0e9f933bda64f733c + docs/models/conversationrestartstreamrequestagentversion.md: + id: 395265f34ff6 + last_write_checksum: sha1:ebf4e89a478ab40e1f8cd3f9a000e179426bda47 + pristine_git_object: 9e0063003f1d8acce61cf4edda91ddbc23a3c69d docs/models/conversationrestartstreamrequesthandoffexecution.md: id: 3e9c4a9ab94d last_write_checksum: sha1:300e197f11ad5efc654b51198b75049890258eef pristine_git_object: 97266b43444f5ed50eeedf574abd99cb201199fd docs/models/conversationstreamrequest.md: id: 833f266c4f96 - last_write_checksum: sha1:b7196c9194bc5167d35d09774a3f26bc7d543790 - pristine_git_object: e403db68e7932f60b1343d9282e2c110414486ce + last_write_checksum: sha1:8d7400dcdb9525c2e45bdaa495df6ca7dcf7f992 + pristine_git_object: 299346f8aaa8ccddcbf7fd083389b74346ef2d4f + docs/models/conversationstreamrequestagentversion.md: + id: e99ccc842929 + last_write_checksum: sha1:0ba5fca217681cdc5e08e0d82db67884bed076a6 + pristine_git_object: 52ee96720abbb3fec822d0792dbde7020f9fb189 docs/models/conversationstreamrequesthandoffexecution.md: id: e6701e5f9f0c last_write_checksum: sha1:ef2ebe8f23f27144e7403f0a522326a7e4f25f50 @@ -745,8 +777,8 @@ trackedFiles: pristine_git_object: dbe3c801003c7bb8616f0c5be2dac2ab1e7e9fb1 docs/models/filesapirouteslistfilesrequest.md: id: 04bdf7c654bd - last_write_checksum: sha1:258317fd5c0738cff883f31e13393ac64f817a6f - pristine_git_object: 3801a96e19f149a665bde4890e26df54d7f07d77 + last_write_checksum: sha1:0a99755150c2ded8e5d59a96527021d29326b980 + pristine_git_object: 57d11722f1dba2640df97c22be2a91317c240608 docs/models/filesapiroutesretrievefilerequest.md: id: 2783bfd9c4b9 last_write_checksum: sha1:a1249ef0aedb3056e613078488832c96b91f8cab @@ -1151,6 +1183,10 @@ trackedFiles: id: b071d5a509cc last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 + docs/models/message.md: + id: a9614076792b + last_write_checksum: sha1:9199637b21212e630336d0d513c6b799732dee54 + pristine_git_object: 752f04a8b5ec3bedb0b5c3e4fbf3e5c3fccc07cd docs/models/messageentries.md: id: 9af3a27b862b last_write_checksum: sha1:a3eb6e37b780644313738f84e6c5ac653b4686bc @@ -1225,12 +1261,12 @@ trackedFiles: pristine_git_object: 3c552bac2fa3a5a3783db994d47d255a94643110 docs/models/mistralpromptmode.md: id: d17d5db4d3b6 - last_write_checksum: sha1:5ccd31d3804f70b6abb0e5a00bda57b9102225e3 - pristine_git_object: 7416e2037c507d19ac02aed914da1208a2fed0a1 + last_write_checksum: sha1:abcb7205c5086169c7d9449d15ac142448a7d258 + pristine_git_object: c3409d03b9646e21a3793372d06dcae6fef95463 docs/models/modelcapabilities.md: id: 283fbc5fa32f - last_write_checksum: sha1:69312b751771ae8ffa0d1452e3c6c545fdbf52b7 - pristine_git_object: 646c8e94fd208cbf01df19ad6c9707ad235bc59b + last_write_checksum: sha1:8a221e2334193907f84cf241ebaf6b86512bbd8b + pristine_git_object: c7dd2710011451c2db15f53ebc659770e786c4ca docs/models/modelconversation.md: id: 497521ee9bd6 last_write_checksum: sha1:bd11f51f1b6fedbf8a1e1973889d1961086c164f @@ -1319,10 +1355,34 @@ trackedFiles: id: 83c8c59c1802 last_write_checksum: sha1:046375bb3035cc033d4484099cd7f5a4f53ce88c pristine_git_object: 7b67583f4209778ac6f945631c0ee03ba1f4c663 + docs/models/queryparamagentversion.md: + id: 49d942f63049 + last_write_checksum: sha1:42557c6bf0afc1eabde48c4b6122f801608d8f05 + pristine_git_object: 3eb5ef1840299139bf969379cbfc3ed49127f176 docs/models/queryparamstatus.md: id: 15628120923d last_write_checksum: sha1:36f1c9b6a6af6f27fbf0190417abf95b4a0bc1b9 pristine_git_object: dcd2090861b16f72b0fb321714b4143bc14b7566 + docs/models/realtimetranscriptionerror.md: + id: 4bc5e819565b + last_write_checksum: sha1:c93e4b19a0aa68723ea69973a9f22a581c7b2ff6 + pristine_git_object: e01f2126b3084eade47a26ea092556f7f61142c9 + docs/models/realtimetranscriptionerrordetail.md: + id: ea137b1051f1 + last_write_checksum: sha1:43ae02b32b473d8ba1aaa3b336a40f706d6338d0 + pristine_git_object: 96420ada2ac94fca24a36ddacae9c876e14ccb7a + docs/models/realtimetranscriptionsession.md: + id: aeb0a0f87d6f + last_write_checksum: sha1:c3aa4050d9cc1b73df8496760f1c723d16183f3a + pristine_git_object: 94a0a89e8ca03866f8b09202a28c4e0f7c3af2e6 + docs/models/realtimetranscriptionsessioncreated.md: + id: aa2ae26192d6 + last_write_checksum: sha1:d13fec916d05300c86b52e951e81b1ceee230634 + pristine_git_object: 34e603fd0a1cbc8007eef06decb158213faebeed + docs/models/realtimetranscriptionsessionupdated.md: + id: 56ce3ae7e208 + last_write_checksum: sha1:833db566b2c8a6839b43cb4e760f2af53a2d7f57 + pristine_git_object: 7e2719957aae390ee18b699e61fbc7581242942f docs/models/referencechunk.md: id: 07895f9debfd last_write_checksum: sha1:97d01dd2b907e87b58bebd9c950e1bef29747c89 @@ -1737,8 +1797,8 @@ trackedFiles: pristine_git_object: e76efb79d8b1353208b42619f4cc5b688ef5d561 docs/sdks/conversations/README.md: id: e22a9d2c5424 - last_write_checksum: sha1:b4e49eadaf5a3bb50f5c3a88a759bc529db2584f - pristine_git_object: c488848cc4c18a098deae8f02c0d4a86d1d898db + last_write_checksum: sha1:06b7381c76c258e2a2dca3764456105929d98315 + pristine_git_object: ca383176a8b349cbaa757690b3f7a2cefe22cb1a docs/sdks/documents/README.md: id: 9758e88a0a9d last_write_checksum: sha1:84791e86c3b9c15f8fd16d2a3df6bd3685023a69 @@ -1749,8 +1809,8 @@ trackedFiles: pristine_git_object: 4390b7bd999a75a608f324f685b2284a8fa277ec docs/sdks/files/README.md: id: e576d7a117f0 - last_write_checksum: sha1:88cd213e513854b8beee72b8ea751f74bf32a845 - pristine_git_object: f0dfd59364c06e84d9cce517594a2912e2b724c8 + last_write_checksum: sha1:99d15a4acce49d5eca853b5a08fd81e76581dc52 + pristine_git_object: 57b53fc75208f4f6361636690b91564148448633 docs/sdks/fim/README.md: id: 499b227bf6ca last_write_checksum: sha1:824f7d1b58ff0b650367737c0e9b91a9d2d14a45 @@ -1765,8 +1825,8 @@ trackedFiles: pristine_git_object: e672c190ad6ac4623f99357d7e59d52f6722518f docs/sdks/mistralagents/README.md: id: 20b3478ad16d - last_write_checksum: sha1:73c444aaf6e547439dafb8d099142fd0059fdf4f - pristine_git_object: 8021fa07d58f71765097d1b3cea7ac4a2d6224a1 + last_write_checksum: sha1:c4e73cd96136392d01b0ce2a57bf0854d05688c0 + pristine_git_object: bdd8d588d88f4929c3b33bcecd72bbb5fce7402d docs/sdks/mistraljobs/README.md: id: 71aafa44d228 last_write_checksum: sha1:255a4221b3b61ef247b39c9723a78408cda486d3 @@ -1809,8 +1869,8 @@ trackedFiles: pristine_git_object: 6d0f3e1166cb0271f89f5ba83441c88199d7a432 src/mistralai/_version.py: id: 37b53ba66d7f - last_write_checksum: sha1:c4d3183c7342cd3d37f1a2fb2a707b2cb76cafec - pristine_git_object: aae7598df33f9fc79d17c1cb19baf2b61539e9db + last_write_checksum: sha1:a4d76992b028e2d138e2f7f6d3087c2a606a21c7 + pristine_git_object: 6ee91593a9fbcd6c53eae810c1c2d0120f56262e src/mistralai/accesses.py: id: 98cb4addd052 last_write_checksum: sha1:5d9d495274d67b1343ba99d755c1c01c64c2ead1 @@ -1845,8 +1905,8 @@ trackedFiles: pristine_git_object: 7c32506ec03cc0fd88b786ff49d7690fd4283d2a src/mistralai/conversations.py: id: be58e57a6198 - last_write_checksum: sha1:76169b9954e645c9d7260b4d9e08be87de7ec643 - pristine_git_object: 93ed8c281a2f44e19f833309ec67b5f35cab1b53 + last_write_checksum: sha1:b9287bbe777a042b8258494cd5162d32e6a89c20 + pristine_git_object: 194cb4c0a629654b31bbcce8391baf48601d0eb7 src/mistralai/documents.py: id: 1945602083a8 last_write_checksum: sha1:14d1e6b5a95869d70a6fc89b07d5365c98aff5d7 @@ -1857,8 +1917,8 @@ trackedFiles: pristine_git_object: 7430f8042df4fec517288d0ddb0eb174e7e43a8e src/mistralai/files.py: id: 0e29db0e2269 - last_write_checksum: sha1:e4f833d390f1b3b682f073a76ffb6e29f89c55d1 - pristine_git_object: ab2c75a2f6774a99fe67ac5d3b0fa6544d093181 + last_write_checksum: sha1:d79d5b1785f441a46673a7efa108ddb98c44376a + pristine_git_object: 90ada0ff707521d59d329bebac74005eb68488d8 src/mistralai/fim.py: id: 71a865142baf last_write_checksum: sha1:7accf79c11a17fefbacde7f2b0f966f3716233df @@ -1881,24 +1941,28 @@ trackedFiles: pristine_git_object: 32648937feb79adf6155423cbe9bac4d7fe52224 src/mistralai/mistral_agents.py: id: 671c4985aaa1 - last_write_checksum: sha1:01d02e6ea96903bf0b9893d24115a154e078096d - pristine_git_object: e4abf6e4cba4cfedbe1d6bd93292318f641d49d0 + last_write_checksum: sha1:1fe4fb4f2828b532ac3ddf3b72e748a53d5099e9 + pristine_git_object: 7fb0ce259cb1c1a3847c567bdc992c176489add6 src/mistralai/mistral_jobs.py: id: 18065a449da0 last_write_checksum: sha1:fb205d962444f6aba163ecd3169c12489b3f0cc9 pristine_git_object: d1aeec8a014b22e44f4fe5e751206c3648e875af src/mistralai/models/__init__.py: id: 3228134f03e5 - last_write_checksum: sha1:0e6ec6d05cfd56d49d761a68e4f42f550695aa81 - pristine_git_object: c35b3d24abc3863d88e40b8d9e8bd2c1a35a4541 + last_write_checksum: sha1:cb1fb02e33b85bf82db7d6fd15b2cc3b109c5060 + pristine_git_object: 23e652220f29a882748661a8c0d21aa2830471bf src/mistralai/models/agent.py: id: ca4162a131b1 last_write_checksum: sha1:fe8a7c8c9c4ba59613d7d89f0c2e7a6958e25f85 pristine_git_object: eb30905b3de2b69ece35bdd40f390b2fa6ffc5a8 + src/mistralai/models/agentaliasresponse.py: + id: d329dd68429e + last_write_checksum: sha1:a3ebf39f159f7cd63dbabd9ff2c79df97e43e41f + pristine_git_object: c0928da9c65c588c515f3f1668ccfb69d3a23861 src/mistralai/models/agentconversation.py: id: bd3035451c40 - last_write_checksum: sha1:2e4a6a5ae0da2e9ccbb588c8487b48077d561d93 - pristine_git_object: 625fb4fc6697860060dfdeb449986d89efc232d6 + last_write_checksum: sha1:724a256f4914116500fd962df4b3cfc79ea75c43 + pristine_git_object: 6007b5715fd4a463d25a244b716effafbeecace6 src/mistralai/models/agentcreationrequest.py: id: 87f33bd9ea58 last_write_checksum: sha1:a6885376d36a5a17273d8d8d8d45e3d6c3ee1b9f @@ -1915,18 +1979,26 @@ trackedFiles: id: ce8e306fa522 last_write_checksum: sha1:2b5bac2f628c0e7cdd6df73404f69f5d405e576c pristine_git_object: 11bfa918903f8de96f98f722eaaf9a70b4fca8c1 + src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py: + id: dd0e03fda847 + last_write_checksum: sha1:a0dd39bb4b0af3a15b1aa8427a6f07d1826c04dc + pristine_git_object: 6cf9d0e0644ce0afd5f673f18fdda9dcccb5f04c src/mistralai/models/agents_api_v1_agents_deleteop.py: id: 588791d168a1 last_write_checksum: sha1:2dae37c3b9778d688663550b9803d52111577f3e pristine_git_object: 38e04953cc320f503a2f6e77096985da60896f2a src/mistralai/models/agents_api_v1_agents_get_versionop.py: id: bdb81ef0e35a - last_write_checksum: sha1:dab21f6fae05e2794208baf3b4e43feeeaf9b3bd - pristine_git_object: 4463d3b25aedad4f3b96a9fb7174a598c843939f + last_write_checksum: sha1:372da3794afd45d442d56edd3ec3cc4907f88223 + pristine_git_object: fddb10dde6707b6641b035e372270991d349f4f3 src/mistralai/models/agents_api_v1_agents_getop.py: id: 2358eceee519 - last_write_checksum: sha1:362d0c781b2c79d829f6e4901e558aaca937b105 - pristine_git_object: dced6dbb49c31fe2981cbd3865c0d580082a1ade + last_write_checksum: sha1:dca59474f75a6636ecac8265cab1bb51d36df56a + pristine_git_object: 2b7d89a5b34f3e768a18f9edbdf712fbcf5c20e4 + src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py: + id: 51215b825530 + last_write_checksum: sha1:d24f8eff3bd19414c0a04e474b33e1c63861a1da + pristine_git_object: 650a7187a3ac419069440fe040a166a036835b37 src/mistralai/models/agents_api_v1_agents_list_versionsop.py: id: 5f680df288a9 last_write_checksum: sha1:a236170f366d9701346b57f9ee4c788a9a2293e5 @@ -2197,24 +2269,24 @@ trackedFiles: pristine_git_object: 32ca9c20cb37ff65f7e9b126650a78a4b97e4b56 src/mistralai/models/conversationrequest.py: id: ceffcc288c2d - last_write_checksum: sha1:32e7b41c01d2d7accccb1f79248b9e1c56c816f3 - pristine_git_object: 09d934ed3db66ecbd5ab8e3406c3ffb8a1c3c606 + last_write_checksum: sha1:c4c62ef9cdf9bb08463bcb12919abd98ceb8d344 + pristine_git_object: 80581cc10a8e7555546e38c8b7068a2744eb552b src/mistralai/models/conversationresponse.py: id: 016ec02abd32 last_write_checksum: sha1:37c3f143b83939b369fe8637932974d163da3c37 pristine_git_object: ff318e35ee63e43c64e504301236327374442a16 src/mistralai/models/conversationrestartrequest.py: id: 2a8207f159f5 - last_write_checksum: sha1:8f53b5faba0b19d8fdf22388c72eb2580ee121f6 - pristine_git_object: a9c8410c7b1010780bf1d98b1580453aeef07509 + last_write_checksum: sha1:93cd4370afe6a06b375e0e54ca09225e02fc42d3 + pristine_git_object: 6f21d01267481b8b47d4d37609ac131c34c10a9b src/mistralai/models/conversationrestartstreamrequest.py: id: d98d3e0c8eed - last_write_checksum: sha1:cba039d9276869be283d83218659f4bf7537b958 - pristine_git_object: 0703bb5fe6566ff15677e5f604537ab9ae2b79bd + last_write_checksum: sha1:90f295ce27ba55d58899e06a29af223a464f5a4c + pristine_git_object: 2cec7958ab31378d480f0f93a5ed75ac8c624442 src/mistralai/models/conversationstreamrequest.py: id: f7051f125d44 - last_write_checksum: sha1:7ce5ab24500754f4c4f36fd07934fe992d7bbb2e - pristine_git_object: 6ff56e1786e7342284bac0fb4b669806cee55c0f + last_write_checksum: sha1:12bc85a14f110f5c8a3149540668bea178995fae + pristine_git_object: 1a481b77f706db7101521756c7c3476eaa1918c5 src/mistralai/models/conversationusageinfo.py: id: 922894aa994b last_write_checksum: sha1:0e0039421d7291ecbbf820ea843031c50371dd9e @@ -2309,8 +2381,8 @@ trackedFiles: pristine_git_object: 708d40ab993f93227b9795c745383ab954c1c89c src/mistralai/models/files_api_routes_list_filesop.py: id: 865dd74c577c - last_write_checksum: sha1:df0af95515546660ec9ff343c17f0b2dfe8b0375 - pristine_git_object: 9b9422b405ba967d7f6ed84196fe8e1dc9c5d95f + last_write_checksum: sha1:d75afa1ee7e34cbcfb8da78e3b5c9384b684b89b + pristine_git_object: 84d61b9b4d7032a60e3055b683a396e53b625274 src/mistralai/models/files_api_routes_retrieve_fileop.py: id: d821f72ee198 last_write_checksum: sha1:d0d07123fd941bb99a00a36e87bc7ab4c21506a6 @@ -2613,12 +2685,12 @@ trackedFiles: pristine_git_object: 28cfd22dc3d567aa4ae55cc19ad89341fa9c96a1 src/mistralai/models/mistralpromptmode.py: id: b2580604c1fe - last_write_checksum: sha1:1ac4d9fb8fbf0b21958be5483a569da7f1f49ff0 - pristine_git_object: ee82fb6d056e2d9699628698750e68b4ab6ef851 + last_write_checksum: sha1:71cf04622681998b091f51e4157463109761333f + pristine_git_object: dfb6f2d2a76fd2749d91397752a38b333bae8b02 src/mistralai/models/modelcapabilities.py: id: a9589b97b15c - last_write_checksum: sha1:d7a7d530750418a54a5fc1698d855df7a519a45c - pristine_git_object: 4b5d5da7da9573f998e977e8a14a9b8f8cbf4f55 + last_write_checksum: sha1:56ea040fb631f0825e9ce2c7b32de2c90f6923a1 + pristine_git_object: 6edf8e5bf238b91a245db3489f09ae24506103f3 src/mistralai/models/modelconversation.py: id: 7d8b7b8d62a8 last_write_checksum: sha1:b76cc407f807c19c1ff5602f7dd1d0421db2486d @@ -2683,6 +2755,26 @@ trackedFiles: id: 54d1c125ef83 last_write_checksum: sha1:475749250ada2566c5a5d769eda1d350ddd8be8f pristine_git_object: e67bfa865dcf94656a67f8612a5420f8b43cc0ec + src/mistralai/models/realtimetranscriptionerror.py: + id: f869fd6faf74 + last_write_checksum: sha1:17f78beea9e1821eed90c8a2412aadf953e17774 + pristine_git_object: 0785f7001aeaba7904120a62d569a35b7ee88a80 + src/mistralai/models/realtimetranscriptionerrordetail.py: + id: d106a319e66b + last_write_checksum: sha1:16e0fea1a3be85dfea6f2c44a53a15a3dc322b4c + pristine_git_object: cb5d73f861ce053a17b66695d2b56bafe1eeb03e + src/mistralai/models/realtimetranscriptionsession.py: + id: 48c7076e6ede + last_write_checksum: sha1:ae722fc946adf7282fd79c3a2c80fb53acc70ef2 + pristine_git_object: bcd0cfe37600b80e59cd50bd0edac3444be34fdb + src/mistralai/models/realtimetranscriptionsessioncreated.py: + id: 24825bcd61b2 + last_write_checksum: sha1:81f840757637e678c4512765ba8fda060f5af8cb + pristine_git_object: 9a2c2860d1538f03e795c62754244131820e2d44 + src/mistralai/models/realtimetranscriptionsessionupdated.py: + id: 5575fb5d1980 + last_write_checksum: sha1:a2d8d5947ba6b46dcd9a0a1e377067dbb92bfdf1 + pristine_git_object: ad1b513364f5d8d2f92fbc012509bf7567fa4573 src/mistralai/models/referencechunk.py: id: 6cdbb4e60749 last_write_checksum: sha1:48a4dddda06aadd16f6ea34c58848430bd561432 @@ -3022,7 +3114,7 @@ examples: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "function_calling": true, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "function_calling": true, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false, "audio_transcription": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} "422": application/json: {} userExample: @@ -3031,7 +3123,7 @@ examples: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": false, "function_calling": false, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": false, "function_calling": false, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false, "audio_transcription": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} delete_model_v1_models__model_id__delete: speakeasy-default-delete-model-v1-models-model-id-delete: parameters: @@ -3774,19 +3866,52 @@ examples: parameters: path: agent_id: "" - version: 788393 + version: "788393" responses: "200": application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Model 3", "name": "", "object": "agent", "id": "", "version": 377706, "versions": [658369, 642981], "created_at": "2024-10-02T23:01:15.980Z", "updated_at": "2026-12-22T00:55:26.568Z", "deployment_chat": false, "source": ""} "422": application/json: {} + agents_api_v1_agents_create_or_update_alias: + speakeasy-default-agents-api-v1-agents-create-or-update-alias: + parameters: + path: + agent_id: "" + query: + alias: "" + version: 595141 + responses: + "200": + application/json: {"alias": "", "version": 768764, "created_at": "2026-12-28T00:40:21.715Z", "updated_at": "2025-09-01T12:54:58.254Z"} + "422": + application/json: {} + agents_api_v1_agents_list_version_aliases: + speakeasy-default-agents-api-v1-agents-list-version-aliases: + parameters: + path: + agent_id: "" + responses: + "200": + application/json: [{"alias": "", "version": 318290, "created_at": "2025-10-02T20:25:32.322Z", "updated_at": "2026-11-19T02:58:37.894Z"}] + "422": + application/json: {} examplesVersion: 1.0.2 generatedTests: {} releaseNotes: | ## Python SDK Changes: - * `mistral.beta.agents.list_versions()`: **Added** - * `mistral.beta.agents.get_version()`: **Added** - * `mistral.ocr.process()`: `request.document_annotation_prompt` **Added** + * `mistral.beta.conversations.restart_stream()`: `request.agent_version` **Changed** **Breaking** :warning: + * `mistral.beta.conversations.start()`: `request.agent_version` **Changed** **Breaking** :warning: + * `mistral.beta.conversations.list()`: `response.[].[agent_conversation].agent_version` **Changed** **Breaking** :warning: + * `mistral.beta.conversations.get()`: `response.[agent_conversation].agent_version` **Changed** **Breaking** :warning: + * `mistral.beta.conversations.restart()`: `request.agent_version` **Changed** **Breaking** :warning: + * `mistral.beta.conversations.start_stream()`: `request.agent_version` **Changed** **Breaking** :warning: + * `mistral.beta.agents.get()`: `request.agent_version` **Changed** **Breaking** :warning: + * `mistral.beta.agents.get_version()`: `request.version` **Changed** **Breaking** :warning: + * `mistral.beta.agents.list_version_aliases()`: **Added** + * `mistral.models.list()`: `response.data.[].[fine-tuned].capabilities.audio_transcription` **Added** + * `mistral.models.retrieve()`: `response.[base].capabilities.audio_transcription` **Added** + * `mistral.beta.agents.create_version_alias()`: **Added** + * `mistral.files.list()`: `request.mimetypes` **Added** generatedFiles: - .gitattributes - .vscode/settings.json diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 4f9a9747..0cc6f059 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -26,7 +26,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.11.1 + version: 1.12.0 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 89c966c7..3bb067a0 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:fd94dc1f574f3bb88a839543675b2c3b3aa895519ec2797efb143ead830ae982 - sourceBlobDigest: sha256:2dd0ee9d000907ffe699cdd48a18163b88297d0ce63f2cdc05efa35cee136bc0 + sourceRevisionDigest: sha256:56bcbb02148ddfabd64cb2dced1a9efe0f00d0fa106435fdd0fb2a889c1c6fed + sourceBlobDigest: sha256:c014d9220f14e04b573acf291c173954b8d34d03d852877a91756afb68ccc65b tags: - latest - - speakeasy-sdk-regen-1768506286 + - speakeasy-sdk-regen-1769979831 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:fd94dc1f574f3bb88a839543675b2c3b3aa895519ec2797efb143ead830ae982 - sourceBlobDigest: sha256:2dd0ee9d000907ffe699cdd48a18163b88297d0ce63f2cdc05efa35cee136bc0 + sourceRevisionDigest: sha256:56bcbb02148ddfabd64cb2dced1a9efe0f00d0fa106435fdd0fb2a889c1c6fed + sourceBlobDigest: sha256:c014d9220f14e04b573acf291c173954b8d34d03d852877a91756afb68ccc65b codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:881a74af90c5678411207a0a9b0e370496d44b18174e96ba7c6812d400788637 + codeSamplesRevisionDigest: sha256:feb7bf2f6fab8456316453c7e14eda6201fe8649fe0ffcdb1eaa4580cc66a51e workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.685.0 diff --git a/README.md b/README.md index 2569d112..131ce557 100644 --- a/README.md +++ b/README.md @@ -475,6 +475,8 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [update_version](docs/sdks/mistralagents/README.md#update_version) - Update an agent version. * [list_versions](docs/sdks/mistralagents/README.md#list_versions) - List all versions of an agent. * [get_version](docs/sdks/mistralagents/README.md#get_version) - Retrieve a specific version of an agent. +* [create_version_alias](docs/sdks/mistralagents/README.md#create_version_alias) - Create or update an agent version alias. +* [list_version_aliases](docs/sdks/mistralagents/README.md#list_version_aliases) - List all aliases for an agent. ### [Beta.Conversations](docs/sdks/conversations/README.md) @@ -752,7 +754,7 @@ with Mistral( **Inherit from [`MistralError`](./src/mistralai/models/mistralerror.py)**: -* [`HTTPValidationError`](./src/mistralai/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 50 of 72 methods.* +* [`HTTPValidationError`](./src/mistralai/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 52 of 74 methods.* * [`ResponseValidationError`](./src/mistralai/models/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute. diff --git a/RELEASES.md b/RELEASES.md index 984e9145..90f534ef 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -358,4 +358,14 @@ Based on: ### Generated - [python v1.11.1] . ### Releases -- [PyPI v1.11.1] https://pypi.org/project/mistralai/1.11.1 - . \ No newline at end of file +- [PyPI v1.11.1] https://pypi.org/project/mistralai/1.11.1 - . + +## 2026-02-01 21:20:42 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.685.0 (2.794.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.12.0] . +### Releases +- [PyPI v1.12.0] https://pypi.org/project/mistralai/1.12.0 - . \ No newline at end of file diff --git a/docs/models/agentaliasresponse.md b/docs/models/agentaliasresponse.md new file mode 100644 index 00000000..aa531ec5 --- /dev/null +++ b/docs/models/agentaliasresponse.md @@ -0,0 +1,11 @@ +# AgentAliasResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `alias` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentconversation.md b/docs/models/agentconversation.md index 92fd673c..a2d61731 100644 --- a/docs/models/agentconversation.md +++ b/docs/models/agentconversation.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | -| `object` | [Optional[models.AgentConversationObject]](../models/agentconversationobject.md) | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `object` | [Optional[models.AgentConversationObject]](../models/agentconversationobject.md) | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.AgentConversationAgentVersion]](../models/agentconversationagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentconversationagentversion.md b/docs/models/agentconversationagentversion.md new file mode 100644 index 00000000..668a8dc0 --- /dev/null +++ b/docs/models/agentconversationagentversion.md @@ -0,0 +1,17 @@ +# AgentConversationAgentVersion + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md b/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md new file mode 100644 index 00000000..79406434 --- /dev/null +++ b/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md @@ -0,0 +1,10 @@ +# AgentsAPIV1AgentsCreateOrUpdateAliasRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `alias` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsgetrequest.md b/docs/models/agentsapiv1agentsgetrequest.md index 825e03a0..c71d4419 100644 --- a/docs/models/agentsapiv1agentsgetrequest.md +++ b/docs/models/agentsapiv1agentsgetrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.QueryParamAgentVersion]](../models/queryparamagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsgetversionrequest.md b/docs/models/agentsapiv1agentsgetversionrequest.md index 7617d274..96a73589 100644 --- a/docs/models/agentsapiv1agentsgetversionrequest.md +++ b/docs/models/agentsapiv1agentsgetversionrequest.md @@ -6,4 +6,4 @@ | Field | Type | Required | Description | | ------------------ | ------------------ | ------------------ | ------------------ | | `agent_id` | *str* | :heavy_check_mark: | N/A | -| `version` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file +| `version` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentslistversionaliasesrequest.md b/docs/models/agentsapiv1agentslistversionaliasesrequest.md new file mode 100644 index 00000000..3083bf92 --- /dev/null +++ b/docs/models/agentsapiv1agentslistversionaliasesrequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1AgentsListVersionAliasesRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentversion.md b/docs/models/agentversion.md new file mode 100644 index 00000000..fd4b6a3e --- /dev/null +++ b/docs/models/agentversion.md @@ -0,0 +1,17 @@ +# AgentVersion + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/conversationrequest.md b/docs/models/conversationrequest.md index 04378ae3..2b4ff8ef 100644 --- a/docs/models/conversationrequest.md +++ b/docs/models/conversationrequest.md @@ -16,5 +16,5 @@ | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.AgentVersion]](../models/agentversion.md) | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationrestartrequest.md b/docs/models/conversationrestartrequest.md index f389a1e5..d9865312 100644 --- a/docs/models/conversationrestartrequest.md +++ b/docs/models/conversationrestartrequest.md @@ -14,4 +14,4 @@ Request to restart a new conversation from a given entry in the conversation. | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | \ No newline at end of file +| `agent_version` | [OptionalNullable[models.ConversationRestartRequestAgentVersion]](../models/conversationrestartrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | \ No newline at end of file diff --git a/docs/models/conversationrestartrequestagentversion.md b/docs/models/conversationrestartrequestagentversion.md new file mode 100644 index 00000000..019ba301 --- /dev/null +++ b/docs/models/conversationrestartrequestagentversion.md @@ -0,0 +1,19 @@ +# ConversationRestartRequestAgentVersion + +Specific version of the agent to use when restarting. If not provided, uses the current version. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/conversationrestartstreamrequest.md b/docs/models/conversationrestartstreamrequest.md index d7358dc2..a5f8cbe7 100644 --- a/docs/models/conversationrestartstreamrequest.md +++ b/docs/models/conversationrestartstreamrequest.md @@ -14,4 +14,4 @@ Request to restart a new conversation from a given entry in the conversation. | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | \ No newline at end of file +| `agent_version` | [OptionalNullable[models.ConversationRestartStreamRequestAgentVersion]](../models/conversationrestartstreamrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | \ No newline at end of file diff --git a/docs/models/conversationrestartstreamrequestagentversion.md b/docs/models/conversationrestartstreamrequestagentversion.md new file mode 100644 index 00000000..9e006300 --- /dev/null +++ b/docs/models/conversationrestartstreamrequestagentversion.md @@ -0,0 +1,19 @@ +# ConversationRestartStreamRequestAgentVersion + +Specific version of the agent to use when restarting. If not provided, uses the current version. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/conversationstreamrequest.md b/docs/models/conversationstreamrequest.md index e403db68..299346f8 100644 --- a/docs/models/conversationstreamrequest.md +++ b/docs/models/conversationstreamrequest.md @@ -16,5 +16,5 @@ | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationStreamRequestAgentVersion]](../models/conversationstreamrequestagentversion.md) | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationstreamrequestagentversion.md b/docs/models/conversationstreamrequestagentversion.md new file mode 100644 index 00000000..52ee9672 --- /dev/null +++ b/docs/models/conversationstreamrequestagentversion.md @@ -0,0 +1,17 @@ +# ConversationStreamRequestAgentVersion + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/filesapirouteslistfilesrequest.md b/docs/models/filesapirouteslistfilesrequest.md index 3801a96e..57d11722 100644 --- a/docs/models/filesapirouteslistfilesrequest.md +++ b/docs/models/filesapirouteslistfilesrequest.md @@ -11,4 +11,5 @@ | `sample_type` | List[[models.SampleType](../models/sampletype.md)] | :heavy_minus_sign: | N/A | | `source` | List[[models.Source](../models/source.md)] | :heavy_minus_sign: | N/A | | `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `purpose` | [OptionalNullable[models.FilePurpose]](../models/filepurpose.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `purpose` | [OptionalNullable[models.FilePurpose]](../models/filepurpose.md) | :heavy_minus_sign: | N/A | +| `mimetypes` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/message.md b/docs/models/message.md new file mode 100644 index 00000000..752f04a8 --- /dev/null +++ b/docs/models/message.md @@ -0,0 +1,19 @@ +# Message + +Human-readable error message. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `Dict[str, Any]` + +```python +value: Dict[str, Any] = /* values here */ +``` + diff --git a/docs/models/mistralpromptmode.md b/docs/models/mistralpromptmode.md index 7416e203..c3409d03 100644 --- a/docs/models/mistralpromptmode.md +++ b/docs/models/mistralpromptmode.md @@ -1,5 +1,9 @@ # MistralPromptMode +Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. + ## Values diff --git a/docs/models/modelcapabilities.md b/docs/models/modelcapabilities.md index 646c8e94..c7dd2710 100644 --- a/docs/models/modelcapabilities.md +++ b/docs/models/modelcapabilities.md @@ -3,14 +3,15 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `vision` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `ocr` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `moderation` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `audio` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| --------------------- | --------------------- | --------------------- | --------------------- | +| `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `vision` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `ocr` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `moderation` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `audio` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `audio_transcription` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/queryparamagentversion.md b/docs/models/queryparamagentversion.md new file mode 100644 index 00000000..3eb5ef18 --- /dev/null +++ b/docs/models/queryparamagentversion.md @@ -0,0 +1,17 @@ +# QueryParamAgentVersion + + +## Supported Types + +### `int` + +```python +value: int = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/realtimetranscriptionerror.md b/docs/models/realtimetranscriptionerror.md new file mode 100644 index 00000000..e01f2126 --- /dev/null +++ b/docs/models/realtimetranscriptionerror.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionError + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["error"]]* | :heavy_minus_sign: | N/A | +| `error` | [models.RealtimeTranscriptionErrorDetail](../models/realtimetranscriptionerrordetail.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionerrordetail.md b/docs/models/realtimetranscriptionerrordetail.md new file mode 100644 index 00000000..96420ada --- /dev/null +++ b/docs/models/realtimetranscriptionerrordetail.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionErrorDetail + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | +| `message` | [models.Message](../models/message.md) | :heavy_check_mark: | Human-readable error message. | +| `code` | *int* | :heavy_check_mark: | Internal error code for debugging. | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsession.md b/docs/models/realtimetranscriptionsession.md new file mode 100644 index 00000000..94a0a89e --- /dev/null +++ b/docs/models/realtimetranscriptionsession.md @@ -0,0 +1,10 @@ +# RealtimeTranscriptionSession + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `request_id` | *str* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `audio_format` | [models.AudioFormat](../models/audioformat.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsessioncreated.md b/docs/models/realtimetranscriptionsessioncreated.md new file mode 100644 index 00000000..34e603fd --- /dev/null +++ b/docs/models/realtimetranscriptionsessioncreated.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionSessionCreated + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["session.created"]]* | :heavy_minus_sign: | N/A | +| `session` | [models.RealtimeTranscriptionSession](../models/realtimetranscriptionsession.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsessionupdated.md b/docs/models/realtimetranscriptionsessionupdated.md new file mode 100644 index 00000000..7e271995 --- /dev/null +++ b/docs/models/realtimetranscriptionsessionupdated.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionSessionUpdated + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["session.updated"]]* | :heavy_minus_sign: | N/A | +| `session` | [models.RealtimeTranscriptionSession](../models/realtimetranscriptionsession.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index c488848c..ca383176 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -60,7 +60,7 @@ with Mistral( | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.AgentVersion]](../../models/agentversion.md) | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | @@ -366,7 +366,7 @@ with Mistral( | `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | +| `agent_version` | [OptionalNullable[models.ConversationRestartRequestAgentVersion]](../../models/conversationrestartrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -431,7 +431,7 @@ with Mistral( | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationStreamRequestAgentVersion]](../../models/conversationstreamrequestagentversion.md) | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | @@ -547,7 +547,7 @@ with Mistral( | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | +| `agent_version` | [OptionalNullable[models.ConversationRestartStreamRequestAgentVersion]](../../models/conversationrestartstreamrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index f0dfd593..57b53fc7 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -95,6 +95,7 @@ with Mistral( | `source` | List[[models.Source](../../models/source.md)] | :heavy_minus_sign: | N/A | | `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `purpose` | [OptionalNullable[models.FilePurpose]](../../models/filepurpose.md) | :heavy_minus_sign: | N/A | +| `mimetypes` | List[*str*] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/mistralagents/README.md b/docs/sdks/mistralagents/README.md index 8021fa07..bdd8d588 100644 --- a/docs/sdks/mistralagents/README.md +++ b/docs/sdks/mistralagents/README.md @@ -14,6 +14,8 @@ * [update_version](#update_version) - Update an agent version. * [list_versions](#list_versions) - List all versions of an agent. * [get_version](#get_version) - Retrieve a specific version of an agent. +* [create_version_alias](#create_version_alias) - Create or update an agent version alias. +* [list_version_aliases](#list_version_aliases) - List all aliases for an agent. ## create @@ -116,7 +118,7 @@ with Mistral( ## get -Given an agent retrieve an agent entity with its attributes. +Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. ### Example Usage @@ -139,11 +141,11 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.QueryParamAgentVersion]](../../models/queryparamagentversion.md) | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -337,7 +339,7 @@ Get a specific agent version by version number. ### Example Usage - + ```python from mistralai import Mistral import os @@ -347,7 +349,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.get_version(agent_id="", version=788393) + res = mistral.beta.agents.get_version(agent_id="", version="788393") # Handle response print(res) @@ -359,7 +361,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `agent_id` | *str* | :heavy_check_mark: | N/A | -| `version` | *int* | :heavy_check_mark: | N/A | +| `version` | *str* | :heavy_check_mark: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -368,6 +370,90 @@ with Mistral( ### Errors +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## create_version_alias + +Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. + +### Example Usage + + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.create_version_alias(agent_id="", alias="", version=595141) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `alias` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.AgentAliasResponse](../../models/agentaliasresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## list_version_aliases + +Retrieve all version aliases for a specific agent. + +### Example Usage + + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.list_version_aliases(agent_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[List[models.AgentAliasResponse]](../../models/.md)** + +### Errors + | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index aae7598d..6ee91593 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.11.1" +__version__: str = "1.12.0" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 1.11.1 2.794.1 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.12.0 2.794.1 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py index 93ed8c28..194cb4c0 100644 --- a/src/mistralai/conversations.py +++ b/src/mistralai/conversations.py @@ -259,7 +259,12 @@ def start( description: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_id: OptionalNullable[str] = UNSET, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrequest.AgentVersion, + models_conversationrequest.AgentVersionTypedDict, + ] + ] = UNSET, model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -405,7 +410,12 @@ async def start_async( description: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_id: OptionalNullable[str] = UNSET, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrequest.AgentVersion, + models_conversationrequest.AgentVersionTypedDict, + ] + ] = UNSET, model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -1711,7 +1721,12 @@ def restart( ] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrestartrequest.ConversationRestartRequestAgentVersion, + models_conversationrestartrequest.ConversationRestartRequestAgentVersionTypedDict, + ] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1846,7 +1861,12 @@ async def restart_async( ] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrestartrequest.ConversationRestartRequestAgentVersion, + models_conversationrestartrequest.ConversationRestartRequestAgentVersionTypedDict, + ] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1991,7 +2011,12 @@ def start_stream( description: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_id: OptionalNullable[str] = UNSET, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationstreamrequest.ConversationStreamRequestAgentVersion, + models_conversationstreamrequest.ConversationStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -2148,7 +2173,12 @@ async def start_stream_async( description: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_id: OptionalNullable[str] = UNSET, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationstreamrequest.ConversationStreamRequestAgentVersion, + models_conversationstreamrequest.ConversationStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -2561,7 +2591,12 @@ def restart_stream( ] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersion, + models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -2703,7 +2738,12 @@ async def restart_stream_async( ] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersion, + models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, diff --git a/src/mistralai/files.py b/src/mistralai/files.py index ab2c75a2..90ada0ff 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -241,6 +241,7 @@ def list( source: OptionalNullable[List[models_source.Source]] = UNSET, search: OptionalNullable[str] = UNSET, purpose: OptionalNullable[models_filepurpose.FilePurpose] = UNSET, + mimetypes: OptionalNullable[List[str]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -257,6 +258,7 @@ def list( :param source: :param search: :param purpose: + :param mimetypes: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -280,6 +282,7 @@ def list( source=source, search=search, purpose=purpose, + mimetypes=mimetypes, ) req = self._build_request( @@ -343,6 +346,7 @@ async def list_async( source: OptionalNullable[List[models_source.Source]] = UNSET, search: OptionalNullable[str] = UNSET, purpose: OptionalNullable[models_filepurpose.FilePurpose] = UNSET, + mimetypes: OptionalNullable[List[str]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -359,6 +363,7 @@ async def list_async( :param source: :param search: :param purpose: + :param mimetypes: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -382,6 +387,7 @@ async def list_async( source=source, search=search, purpose=purpose, + mimetypes=mimetypes, ) req = self._build_request_async( diff --git a/src/mistralai/mistral_agents.py b/src/mistralai/mistral_agents.py index e4abf6e4..7fb0ce25 100644 --- a/src/mistralai/mistral_agents.py +++ b/src/mistralai/mistral_agents.py @@ -5,6 +5,7 @@ from mistralai._hooks import HookContext from mistralai.models import ( agentcreationrequest as models_agentcreationrequest, + agents_api_v1_agents_getop as models_agents_api_v1_agents_getop, agentupdaterequest as models_agentupdaterequest, completionargs as models_completionargs, requestsource as models_requestsource, @@ -494,7 +495,12 @@ def get( self, *, agent_id: str, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_agents_api_v1_agents_getop.QueryParamAgentVersion, + models_agents_api_v1_agents_getop.QueryParamAgentVersionTypedDict, + ] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -502,7 +508,7 @@ def get( ) -> models.Agent: r"""Retrieve an agent entity. - Given an agent retrieve an agent entity with its attributes. + Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. :param agent_id: :param agent_version: @@ -587,7 +593,12 @@ async def get_async( self, *, agent_id: str, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_agents_api_v1_agents_getop.QueryParamAgentVersion, + models_agents_api_v1_agents_getop.QueryParamAgentVersionTypedDict, + ] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -595,7 +606,7 @@ async def get_async( ) -> models.Agent: r"""Retrieve an agent entity. - Given an agent retrieve an agent entity with its attributes. + Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. :param agent_id: :param agent_version: @@ -1514,7 +1525,7 @@ def get_version( self, *, agent_id: str, - version: int, + version: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1548,7 +1559,7 @@ def get_version( req = self._build_request( method="GET", - path="/v1/agents/{agent_id}/version/{version}", + path="/v1/agents/{agent_id}/versions/{version}", base_url=base_url, url_variables=url_variables, request=request, @@ -1607,7 +1618,7 @@ async def get_version_async( self, *, agent_id: str, - version: int, + version: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1641,7 +1652,7 @@ async def get_version_async( req = self._build_request_async( method="GET", - path="/v1/agents/{agent_id}/version/{version}", + path="/v1/agents/{agent_id}/versions/{version}", base_url=base_url, url_variables=url_variables, request=request, @@ -1695,3 +1706,375 @@ async def get_version_async( raise models.SDKError("API error occurred", http_res, http_res_text) raise models.SDKError("Unexpected response received", http_res) + + def create_version_alias( + self, + *, + agent_id: str, + alias: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentAliasResponse: + r"""Create or update an agent version alias. + + Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. + + :param agent_id: + :param alias: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( + agent_id=agent_id, + alias=alias, + version=version, + ) + + req = self._build_request( + method="PUT", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create_or_update_alias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.AgentAliasResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def create_version_alias_async( + self, + *, + agent_id: str, + alias: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentAliasResponse: + r"""Create or update an agent version alias. + + Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. + + :param agent_id: + :param alias: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( + agent_id=agent_id, + alias=alias, + version=version, + ) + + req = self._build_request_async( + method="PUT", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create_or_update_alias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.AgentAliasResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def list_version_aliases( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.AgentAliasResponse]: + r"""List all aliases for an agent. + + Retrieve all version aliases for a specific agent. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionAliasesRequest( + agent_id=agent_id, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_version_aliases", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.AgentAliasResponse], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_version_aliases_async( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.AgentAliasResponse]: + r"""List all aliases for an agent. + + Retrieve all version aliases for a specific agent. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionAliasesRequest( + agent_id=agent_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_version_aliases", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.AgentAliasResponse], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index c35b3d24..23e65222 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -14,8 +14,11 @@ AgentToolsTypedDict, AgentTypedDict, ) + from .agentaliasresponse import AgentAliasResponse, AgentAliasResponseTypedDict from .agentconversation import ( AgentConversation, + AgentConversationAgentVersion, + AgentConversationAgentVersionTypedDict, AgentConversationObject, AgentConversationTypedDict, ) @@ -41,6 +44,10 @@ AgentHandoffStartedEventType, AgentHandoffStartedEventTypedDict, ) + from .agents_api_v1_agents_create_or_update_aliasop import ( + AgentsAPIV1AgentsCreateOrUpdateAliasRequest, + AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict, + ) from .agents_api_v1_agents_deleteop import ( AgentsAPIV1AgentsDeleteRequest, AgentsAPIV1AgentsDeleteRequestTypedDict, @@ -52,6 +59,12 @@ from .agents_api_v1_agents_getop import ( AgentsAPIV1AgentsGetRequest, AgentsAPIV1AgentsGetRequestTypedDict, + QueryParamAgentVersion, + QueryParamAgentVersionTypedDict, + ) + from .agents_api_v1_agents_list_version_aliasesop import ( + AgentsAPIV1AgentsListVersionAliasesRequest, + AgentsAPIV1AgentsListVersionAliasesRequestTypedDict, ) from .agents_api_v1_agents_list_versionsop import ( AgentsAPIV1AgentsListVersionsRequest, @@ -340,6 +353,8 @@ ConversationMessagesTypedDict, ) from .conversationrequest import ( + AgentVersion, + AgentVersionTypedDict, ConversationRequest, ConversationRequestTypedDict, HandoffExecution, @@ -355,16 +370,22 @@ ) from .conversationrestartrequest import ( ConversationRestartRequest, + ConversationRestartRequestAgentVersion, + ConversationRestartRequestAgentVersionTypedDict, ConversationRestartRequestHandoffExecution, ConversationRestartRequestTypedDict, ) from .conversationrestartstreamrequest import ( ConversationRestartStreamRequest, + ConversationRestartStreamRequestAgentVersion, + ConversationRestartStreamRequestAgentVersionTypedDict, ConversationRestartStreamRequestHandoffExecution, ConversationRestartStreamRequestTypedDict, ) from .conversationstreamrequest import ( ConversationStreamRequest, + ConversationStreamRequestAgentVersion, + ConversationStreamRequestAgentVersionTypedDict, ConversationStreamRequestHandoffExecution, ConversationStreamRequestTools, ConversationStreamRequestToolsTypedDict, @@ -758,6 +779,28 @@ from .paginationinfo import PaginationInfo, PaginationInfoTypedDict from .prediction import Prediction, PredictionTypedDict from .processingstatusout import ProcessingStatusOut, ProcessingStatusOutTypedDict + from .realtimetranscriptionerror import ( + RealtimeTranscriptionError, + RealtimeTranscriptionErrorTypedDict, + ) + from .realtimetranscriptionerrordetail import ( + Message, + MessageTypedDict, + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionErrorDetailTypedDict, + ) + from .realtimetranscriptionsession import ( + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionTypedDict, + ) + from .realtimetranscriptionsessioncreated import ( + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionCreatedTypedDict, + ) + from .realtimetranscriptionsessionupdated import ( + RealtimeTranscriptionSessionUpdated, + RealtimeTranscriptionSessionUpdatedTypedDict, + ) from .referencechunk import ( ReferenceChunk, ReferenceChunkType, @@ -951,7 +994,11 @@ __all__ = [ "APIEndpoint", "Agent", + "AgentAliasResponse", + "AgentAliasResponseTypedDict", "AgentConversation", + "AgentConversationAgentVersion", + "AgentConversationAgentVersionTypedDict", "AgentConversationObject", "AgentConversationTypedDict", "AgentCreationRequest", @@ -976,6 +1023,10 @@ "AgentUpdateRequestTools", "AgentUpdateRequestToolsTypedDict", "AgentUpdateRequestTypedDict", + "AgentVersion", + "AgentVersionTypedDict", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequest", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict", "AgentsAPIV1AgentsDeleteRequest", "AgentsAPIV1AgentsDeleteRequestTypedDict", "AgentsAPIV1AgentsGetRequest", @@ -984,6 +1035,8 @@ "AgentsAPIV1AgentsGetVersionRequestTypedDict", "AgentsAPIV1AgentsListRequest", "AgentsAPIV1AgentsListRequestTypedDict", + "AgentsAPIV1AgentsListVersionAliasesRequest", + "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict", "AgentsAPIV1AgentsListVersionsRequest", "AgentsAPIV1AgentsListVersionsRequestTypedDict", "AgentsAPIV1AgentsUpdateRequest", @@ -1184,12 +1237,18 @@ "ConversationResponseObject", "ConversationResponseTypedDict", "ConversationRestartRequest", + "ConversationRestartRequestAgentVersion", + "ConversationRestartRequestAgentVersionTypedDict", "ConversationRestartRequestHandoffExecution", "ConversationRestartRequestTypedDict", "ConversationRestartStreamRequest", + "ConversationRestartStreamRequestAgentVersion", + "ConversationRestartStreamRequestAgentVersionTypedDict", "ConversationRestartStreamRequestHandoffExecution", "ConversationRestartStreamRequestTypedDict", "ConversationStreamRequest", + "ConversationStreamRequestAgentVersion", + "ConversationStreamRequestAgentVersionTypedDict", "ConversationStreamRequestHandoffExecution", "ConversationStreamRequestTools", "ConversationStreamRequestToolsTypedDict", @@ -1431,6 +1490,7 @@ "ListSharingOutTypedDict", "Loc", "LocTypedDict", + "Message", "MessageEntries", "MessageEntriesTypedDict", "MessageInputContentChunks", @@ -1456,6 +1516,7 @@ "MessageOutputEventRole", "MessageOutputEventType", "MessageOutputEventTypedDict", + "MessageTypedDict", "Messages", "MessagesTypedDict", "MetricOut", @@ -1506,7 +1567,19 @@ "PredictionTypedDict", "ProcessingStatusOut", "ProcessingStatusOutTypedDict", + "QueryParamAgentVersion", + "QueryParamAgentVersionTypedDict", "QueryParamStatus", + "RealtimeTranscriptionError", + "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionErrorDetailTypedDict", + "RealtimeTranscriptionErrorTypedDict", + "RealtimeTranscriptionSession", + "RealtimeTranscriptionSessionCreated", + "RealtimeTranscriptionSessionCreatedTypedDict", + "RealtimeTranscriptionSessionTypedDict", + "RealtimeTranscriptionSessionUpdated", + "RealtimeTranscriptionSessionUpdatedTypedDict", "ReferenceChunk", "ReferenceChunkType", "ReferenceChunkTypedDict", @@ -1675,7 +1748,11 @@ "AgentTools": ".agent", "AgentToolsTypedDict": ".agent", "AgentTypedDict": ".agent", + "AgentAliasResponse": ".agentaliasresponse", + "AgentAliasResponseTypedDict": ".agentaliasresponse", "AgentConversation": ".agentconversation", + "AgentConversationAgentVersion": ".agentconversation", + "AgentConversationAgentVersionTypedDict": ".agentconversation", "AgentConversationObject": ".agentconversation", "AgentConversationTypedDict": ".agentconversation", "AgentCreationRequest": ".agentcreationrequest", @@ -1692,12 +1769,18 @@ "AgentHandoffStartedEvent": ".agenthandoffstartedevent", "AgentHandoffStartedEventType": ".agenthandoffstartedevent", "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequest": ".agents_api_v1_agents_create_or_update_aliasop", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict": ".agents_api_v1_agents_create_or_update_aliasop", "AgentsAPIV1AgentsDeleteRequest": ".agents_api_v1_agents_deleteop", "AgentsAPIV1AgentsDeleteRequestTypedDict": ".agents_api_v1_agents_deleteop", "AgentsAPIV1AgentsGetVersionRequest": ".agents_api_v1_agents_get_versionop", "AgentsAPIV1AgentsGetVersionRequestTypedDict": ".agents_api_v1_agents_get_versionop", "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", + "QueryParamAgentVersion": ".agents_api_v1_agents_getop", + "QueryParamAgentVersionTypedDict": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsListVersionAliasesRequest": ".agents_api_v1_agents_list_version_aliasesop", + "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict": ".agents_api_v1_agents_list_version_aliasesop", "AgentsAPIV1AgentsListVersionsRequest": ".agents_api_v1_agents_list_versionsop", "AgentsAPIV1AgentsListVersionsRequestTypedDict": ".agents_api_v1_agents_list_versionsop", "AgentsAPIV1AgentsListRequest": ".agents_api_v1_agents_listop", @@ -1913,6 +1996,8 @@ "ConversationMessages": ".conversationmessages", "ConversationMessagesObject": ".conversationmessages", "ConversationMessagesTypedDict": ".conversationmessages", + "AgentVersion": ".conversationrequest", + "AgentVersionTypedDict": ".conversationrequest", "ConversationRequest": ".conversationrequest", "ConversationRequestTypedDict": ".conversationrequest", "HandoffExecution": ".conversationrequest", @@ -1924,12 +2009,18 @@ "Outputs": ".conversationresponse", "OutputsTypedDict": ".conversationresponse", "ConversationRestartRequest": ".conversationrestartrequest", + "ConversationRestartRequestAgentVersion": ".conversationrestartrequest", + "ConversationRestartRequestAgentVersionTypedDict": ".conversationrestartrequest", "ConversationRestartRequestHandoffExecution": ".conversationrestartrequest", "ConversationRestartRequestTypedDict": ".conversationrestartrequest", "ConversationRestartStreamRequest": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestAgentVersion": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestAgentVersionTypedDict": ".conversationrestartstreamrequest", "ConversationRestartStreamRequestHandoffExecution": ".conversationrestartstreamrequest", "ConversationRestartStreamRequestTypedDict": ".conversationrestartstreamrequest", "ConversationStreamRequest": ".conversationstreamrequest", + "ConversationStreamRequestAgentVersion": ".conversationstreamrequest", + "ConversationStreamRequestAgentVersionTypedDict": ".conversationstreamrequest", "ConversationStreamRequestHandoffExecution": ".conversationstreamrequest", "ConversationStreamRequestTools": ".conversationstreamrequest", "ConversationStreamRequestToolsTypedDict": ".conversationstreamrequest", @@ -2237,6 +2328,18 @@ "PredictionTypedDict": ".prediction", "ProcessingStatusOut": ".processingstatusout", "ProcessingStatusOutTypedDict": ".processingstatusout", + "RealtimeTranscriptionError": ".realtimetranscriptionerror", + "RealtimeTranscriptionErrorTypedDict": ".realtimetranscriptionerror", + "Message": ".realtimetranscriptionerrordetail", + "MessageTypedDict": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionErrorDetail": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionErrorDetailTypedDict": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionSession": ".realtimetranscriptionsession", + "RealtimeTranscriptionSessionTypedDict": ".realtimetranscriptionsession", + "RealtimeTranscriptionSessionCreated": ".realtimetranscriptionsessioncreated", + "RealtimeTranscriptionSessionCreatedTypedDict": ".realtimetranscriptionsessioncreated", + "RealtimeTranscriptionSessionUpdated": ".realtimetranscriptionsessionupdated", + "RealtimeTranscriptionSessionUpdatedTypedDict": ".realtimetranscriptionsessionupdated", "ReferenceChunk": ".referencechunk", "ReferenceChunkType": ".referencechunk", "ReferenceChunkTypedDict": ".referencechunk", diff --git a/src/mistralai/models/agentaliasresponse.py b/src/mistralai/models/agentaliasresponse.py new file mode 100644 index 00000000..c0928da9 --- /dev/null +++ b/src/mistralai/models/agentaliasresponse.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel +from typing_extensions import TypedDict + + +class AgentAliasResponseTypedDict(TypedDict): + alias: str + version: int + created_at: datetime + updated_at: datetime + + +class AgentAliasResponse(BaseModel): + alias: str + + version: int + + created_at: datetime + + updated_at: datetime diff --git a/src/mistralai/models/agentconversation.py b/src/mistralai/models/agentconversation.py index 625fb4fc..6007b571 100644 --- a/src/mistralai/models/agentconversation.py +++ b/src/mistralai/models/agentconversation.py @@ -4,13 +4,23 @@ from datetime import datetime from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict AgentConversationObject = Literal["conversation",] +AgentConversationAgentVersionTypedDict = TypeAliasType( + "AgentConversationAgentVersionTypedDict", Union[str, int] +) + + +AgentConversationAgentVersion = TypeAliasType( + "AgentConversationAgentVersion", Union[str, int] +) + + class AgentConversationTypedDict(TypedDict): id: str created_at: datetime @@ -23,7 +33,7 @@ class AgentConversationTypedDict(TypedDict): metadata: NotRequired[Nullable[Dict[str, Any]]] r"""Custom metadata for the conversation.""" object: NotRequired[AgentConversationObject] - agent_version: NotRequired[Nullable[int]] + agent_version: NotRequired[Nullable[AgentConversationAgentVersionTypedDict]] class AgentConversation(BaseModel): @@ -46,7 +56,7 @@ class AgentConversation(BaseModel): object: Optional[AgentConversationObject] = "conversation" - agent_version: OptionalNullable[int] = UNSET + agent_version: OptionalNullable[AgentConversationAgentVersion] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py b/src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py new file mode 100644 index 00000000..6cf9d0e0 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict(TypedDict): + agent_id: str + alias: str + version: int + + +class AgentsAPIV1AgentsCreateOrUpdateAliasRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + alias: Annotated[ + str, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] + + version: Annotated[ + int, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] diff --git a/src/mistralai/models/agents_api_v1_agents_get_versionop.py b/src/mistralai/models/agents_api_v1_agents_get_versionop.py index 4463d3b2..fddb10dd 100644 --- a/src/mistralai/models/agents_api_v1_agents_get_versionop.py +++ b/src/mistralai/models/agents_api_v1_agents_get_versionop.py @@ -8,7 +8,7 @@ class AgentsAPIV1AgentsGetVersionRequestTypedDict(TypedDict): agent_id: str - version: int + version: str class AgentsAPIV1AgentsGetVersionRequest(BaseModel): @@ -17,5 +17,5 @@ class AgentsAPIV1AgentsGetVersionRequest(BaseModel): ] version: Annotated[ - int, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/models/agents_api_v1_agents_getop.py b/src/mistralai/models/agents_api_v1_agents_getop.py index dced6dbb..2b7d89a5 100644 --- a/src/mistralai/models/agents_api_v1_agents_getop.py +++ b/src/mistralai/models/agents_api_v1_agents_getop.py @@ -4,12 +4,21 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata from pydantic import model_serializer -from typing_extensions import Annotated, NotRequired, TypedDict +from typing import Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +QueryParamAgentVersionTypedDict = TypeAliasType( + "QueryParamAgentVersionTypedDict", Union[int, str] +) + + +QueryParamAgentVersion = TypeAliasType("QueryParamAgentVersion", Union[int, str]) class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): agent_id: str - agent_version: NotRequired[Nullable[int]] + agent_version: NotRequired[Nullable[QueryParamAgentVersionTypedDict]] class AgentsAPIV1AgentsGetRequest(BaseModel): @@ -18,7 +27,7 @@ class AgentsAPIV1AgentsGetRequest(BaseModel): ] agent_version: Annotated[ - OptionalNullable[int], + OptionalNullable[QueryParamAgentVersion], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET diff --git a/src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py b/src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py new file mode 100644 index 00000000..650a7187 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsListVersionAliasesRequestTypedDict(TypedDict): + agent_id: str + + +class AgentsAPIV1AgentsListVersionAliasesRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/conversationrequest.py b/src/mistralai/models/conversationrequest.py index 09d934ed..80581cc1 100644 --- a/src/mistralai/models/conversationrequest.py +++ b/src/mistralai/models/conversationrequest.py @@ -48,6 +48,12 @@ ] +AgentVersionTypedDict = TypeAliasType("AgentVersionTypedDict", Union[str, int]) + + +AgentVersion = TypeAliasType("AgentVersion", Union[str, int]) + + class ConversationRequestTypedDict(TypedDict): inputs: ConversationInputsTypedDict stream: NotRequired[bool] @@ -61,7 +67,7 @@ class ConversationRequestTypedDict(TypedDict): description: NotRequired[Nullable[str]] metadata: NotRequired[Nullable[Dict[str, Any]]] agent_id: NotRequired[Nullable[str]] - agent_version: NotRequired[Nullable[int]] + agent_version: NotRequired[Nullable[AgentVersionTypedDict]] model: NotRequired[Nullable[str]] @@ -89,7 +95,7 @@ class ConversationRequest(BaseModel): agent_id: OptionalNullable[str] = UNSET - agent_version: OptionalNullable[int] = UNSET + agent_version: OptionalNullable[AgentVersion] = UNSET model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/conversationrestartrequest.py b/src/mistralai/models/conversationrestartrequest.py index a9c8410c..6f21d012 100644 --- a/src/mistralai/models/conversationrestartrequest.py +++ b/src/mistralai/models/conversationrestartrequest.py @@ -5,8 +5,8 @@ from .conversationinputs import ConversationInputs, ConversationInputsTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict ConversationRestartRequestHandoffExecution = Literal[ @@ -15,6 +15,18 @@ ] +ConversationRestartRequestAgentVersionTypedDict = TypeAliasType( + "ConversationRestartRequestAgentVersionTypedDict", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +ConversationRestartRequestAgentVersion = TypeAliasType( + "ConversationRestartRequestAgentVersion", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + class ConversationRestartRequestTypedDict(TypedDict): r"""Request to restart a new conversation from a given entry in the conversation.""" @@ -28,7 +40,9 @@ class ConversationRestartRequestTypedDict(TypedDict): r"""White-listed arguments from the completion API""" metadata: NotRequired[Nullable[Dict[str, Any]]] r"""Custom metadata for the conversation.""" - agent_version: NotRequired[Nullable[int]] + agent_version: NotRequired[ + Nullable[ConversationRestartRequestAgentVersionTypedDict] + ] r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" @@ -52,7 +66,7 @@ class ConversationRestartRequest(BaseModel): metadata: OptionalNullable[Dict[str, Any]] = UNSET r"""Custom metadata for the conversation.""" - agent_version: OptionalNullable[int] = UNSET + agent_version: OptionalNullable[ConversationRestartRequestAgentVersion] = UNSET r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" @model_serializer(mode="wrap") diff --git a/src/mistralai/models/conversationrestartstreamrequest.py b/src/mistralai/models/conversationrestartstreamrequest.py index 0703bb5f..2cec7958 100644 --- a/src/mistralai/models/conversationrestartstreamrequest.py +++ b/src/mistralai/models/conversationrestartstreamrequest.py @@ -5,8 +5,8 @@ from .conversationinputs import ConversationInputs, ConversationInputsTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict ConversationRestartStreamRequestHandoffExecution = Literal[ @@ -15,6 +15,18 @@ ] +ConversationRestartStreamRequestAgentVersionTypedDict = TypeAliasType( + "ConversationRestartStreamRequestAgentVersionTypedDict", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +ConversationRestartStreamRequestAgentVersion = TypeAliasType( + "ConversationRestartStreamRequestAgentVersion", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + class ConversationRestartStreamRequestTypedDict(TypedDict): r"""Request to restart a new conversation from a given entry in the conversation.""" @@ -28,7 +40,9 @@ class ConversationRestartStreamRequestTypedDict(TypedDict): r"""White-listed arguments from the completion API""" metadata: NotRequired[Nullable[Dict[str, Any]]] r"""Custom metadata for the conversation.""" - agent_version: NotRequired[Nullable[int]] + agent_version: NotRequired[ + Nullable[ConversationRestartStreamRequestAgentVersionTypedDict] + ] r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" @@ -54,7 +68,9 @@ class ConversationRestartStreamRequest(BaseModel): metadata: OptionalNullable[Dict[str, Any]] = UNSET r"""Custom metadata for the conversation.""" - agent_version: OptionalNullable[int] = UNSET + agent_version: OptionalNullable[ConversationRestartStreamRequestAgentVersion] = ( + UNSET + ) r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" @model_serializer(mode="wrap") diff --git a/src/mistralai/models/conversationstreamrequest.py b/src/mistralai/models/conversationstreamrequest.py index 6ff56e17..1a481b77 100644 --- a/src/mistralai/models/conversationstreamrequest.py +++ b/src/mistralai/models/conversationstreamrequest.py @@ -48,6 +48,16 @@ ] +ConversationStreamRequestAgentVersionTypedDict = TypeAliasType( + "ConversationStreamRequestAgentVersionTypedDict", Union[str, int] +) + + +ConversationStreamRequestAgentVersion = TypeAliasType( + "ConversationStreamRequestAgentVersion", Union[str, int] +) + + class ConversationStreamRequestTypedDict(TypedDict): inputs: ConversationInputsTypedDict stream: NotRequired[bool] @@ -61,7 +71,7 @@ class ConversationStreamRequestTypedDict(TypedDict): description: NotRequired[Nullable[str]] metadata: NotRequired[Nullable[Dict[str, Any]]] agent_id: NotRequired[Nullable[str]] - agent_version: NotRequired[Nullable[int]] + agent_version: NotRequired[Nullable[ConversationStreamRequestAgentVersionTypedDict]] model: NotRequired[Nullable[str]] @@ -91,7 +101,7 @@ class ConversationStreamRequest(BaseModel): agent_id: OptionalNullable[str] = UNSET - agent_version: OptionalNullable[int] = UNSET + agent_version: OptionalNullable[ConversationStreamRequestAgentVersion] = UNSET model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/files_api_routes_list_filesop.py b/src/mistralai/models/files_api_routes_list_filesop.py index 9b9422b4..84d61b9b 100644 --- a/src/mistralai/models/files_api_routes_list_filesop.py +++ b/src/mistralai/models/files_api_routes_list_filesop.py @@ -19,6 +19,7 @@ class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): source: NotRequired[Nullable[List[Source]]] search: NotRequired[Nullable[str]] purpose: NotRequired[Nullable[FilePurpose]] + mimetypes: NotRequired[Nullable[List[str]]] class FilesAPIRoutesListFilesRequest(BaseModel): @@ -57,6 +58,11 @@ class FilesAPIRoutesListFilesRequest(BaseModel): FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET + mimetypes: Annotated[ + OptionalNullable[List[str]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -67,8 +73,9 @@ def serialize_model(self, handler): "source", "search", "purpose", + "mimetypes", ] - nullable_fields = ["sample_type", "source", "search", "purpose"] + nullable_fields = ["sample_type", "source", "search", "purpose", "mimetypes"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/mistralpromptmode.py b/src/mistralai/models/mistralpromptmode.py index ee82fb6d..dfb6f2d2 100644 --- a/src/mistralai/models/mistralpromptmode.py +++ b/src/mistralai/models/mistralpromptmode.py @@ -6,3 +6,7 @@ MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] +r"""Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. +""" diff --git a/src/mistralai/models/modelcapabilities.py b/src/mistralai/models/modelcapabilities.py index 4b5d5da7..6edf8e5b 100644 --- a/src/mistralai/models/modelcapabilities.py +++ b/src/mistralai/models/modelcapabilities.py @@ -16,6 +16,7 @@ class ModelCapabilitiesTypedDict(TypedDict): classification: NotRequired[bool] moderation: NotRequired[bool] audio: NotRequired[bool] + audio_transcription: NotRequired[bool] class ModelCapabilities(BaseModel): @@ -36,3 +37,5 @@ class ModelCapabilities(BaseModel): moderation: Optional[bool] = False audio: Optional[bool] = False + + audio_transcription: Optional[bool] = False diff --git a/src/mistralai/models/realtimetranscriptionerror.py b/src/mistralai/models/realtimetranscriptionerror.py new file mode 100644 index 00000000..0785f700 --- /dev/null +++ b/src/mistralai/models/realtimetranscriptionerror.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .realtimetranscriptionerrordetail import ( + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionErrorDetailTypedDict, +) +from mistralai.types import BaseModel +from mistralai.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionErrorTypedDict(TypedDict): + error: RealtimeTranscriptionErrorDetailTypedDict + type: Literal["error"] + + +class RealtimeTranscriptionError(BaseModel): + error: RealtimeTranscriptionErrorDetail + + TYPE: Annotated[ + Annotated[Optional[Literal["error"]], AfterValidator(validate_const("error"))], + pydantic.Field(alias="type"), + ] = "error" diff --git a/src/mistralai/models/realtimetranscriptionerrordetail.py b/src/mistralai/models/realtimetranscriptionerrordetail.py new file mode 100644 index 00000000..cb5d73f8 --- /dev/null +++ b/src/mistralai/models/realtimetranscriptionerrordetail.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType, TypedDict + + +MessageTypedDict = TypeAliasType("MessageTypedDict", Union[str, Dict[str, Any]]) +r"""Human-readable error message.""" + + +Message = TypeAliasType("Message", Union[str, Dict[str, Any]]) +r"""Human-readable error message.""" + + +class RealtimeTranscriptionErrorDetailTypedDict(TypedDict): + message: MessageTypedDict + r"""Human-readable error message.""" + code: int + r"""Internal error code for debugging.""" + + +class RealtimeTranscriptionErrorDetail(BaseModel): + message: Message + r"""Human-readable error message.""" + + code: int + r"""Internal error code for debugging.""" diff --git a/src/mistralai/models/realtimetranscriptionsession.py b/src/mistralai/models/realtimetranscriptionsession.py new file mode 100644 index 00000000..bcd0cfe3 --- /dev/null +++ b/src/mistralai/models/realtimetranscriptionsession.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .audioformat import AudioFormat, AudioFormatTypedDict +from mistralai.types import BaseModel +from typing_extensions import TypedDict + + +class RealtimeTranscriptionSessionTypedDict(TypedDict): + request_id: str + model: str + audio_format: AudioFormatTypedDict + + +class RealtimeTranscriptionSession(BaseModel): + request_id: str + + model: str + + audio_format: AudioFormat diff --git a/src/mistralai/models/realtimetranscriptionsessioncreated.py b/src/mistralai/models/realtimetranscriptionsessioncreated.py new file mode 100644 index 00000000..9a2c2860 --- /dev/null +++ b/src/mistralai/models/realtimetranscriptionsessioncreated.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .realtimetranscriptionsession import ( + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionTypedDict, +) +from mistralai.types import BaseModel +from mistralai.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionSessionCreatedTypedDict(TypedDict): + session: RealtimeTranscriptionSessionTypedDict + type: Literal["session.created"] + + +class RealtimeTranscriptionSessionCreated(BaseModel): + session: RealtimeTranscriptionSession + + TYPE: Annotated[ + Annotated[ + Optional[Literal["session.created"]], + AfterValidator(validate_const("session.created")), + ], + pydantic.Field(alias="type"), + ] = "session.created" diff --git a/src/mistralai/models/realtimetranscriptionsessionupdated.py b/src/mistralai/models/realtimetranscriptionsessionupdated.py new file mode 100644 index 00000000..ad1b5133 --- /dev/null +++ b/src/mistralai/models/realtimetranscriptionsessionupdated.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .realtimetranscriptionsession import ( + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionTypedDict, +) +from mistralai.types import BaseModel +from mistralai.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionSessionUpdatedTypedDict(TypedDict): + session: RealtimeTranscriptionSessionTypedDict + type: Literal["session.updated"] + + +class RealtimeTranscriptionSessionUpdated(BaseModel): + session: RealtimeTranscriptionSession + + TYPE: Annotated[ + Annotated[ + Optional[Literal["session.updated"]], + AfterValidator(validate_const("session.updated")), + ], + pydantic.Field(alias="type"), + ] = "session.updated" diff --git a/uv.lock b/uv.lock index dc8f42ea..efffa7ad 100644 --- a/uv.lock +++ b/uv.lock @@ -563,7 +563,7 @@ wheels = [ [[package]] name = "mistralai" -version = "1.11.0" +version = "1.11.1" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, From caf71b23f165f202a81afa11fdbc9d51a1f34ea5 Mon Sep 17 00:00:00 2001 From: jean-malo Date: Sun, 1 Feb 2026 22:44:58 +0100 Subject: [PATCH 02/42] feat(realtime): add realtime audio transcription support This commit adds support for realtime audio transcription using WebSocket connections. The implementation includes: 1. New realtime transcription client in the extra module 2. Examples for microphone and file-based transcription 3. Support for audio format negotiation 4. Proper error handling and connection management The realtime transcription feature requires the websockets package (>=13.0) which is now added as an optional dependency. This implementation allows for streaming audio data to the Mistral API and receiving transcription results in realtime. The changes include new models for realtime events and connection management, as well as updated audio.py to expose the realtime functionality. --- ...async_realtime_transcription_microphone.py | 225 +++++++++++++++ .../async_realtime_transcription_stream.py | 144 ++++++++++ examples/mistral/audio/chat_base64.py | 19 +- examples/mistral/audio/chat_no_streaming.py | 19 +- examples/mistral/audio/chat_streaming.py | 24 +- .../audio/transcription_diarize_async.py | 28 ++ .../async_batch_job_chat_completion_inline.py | 1 - pyproject.toml | 3 + scripts/run_examples.sh | 3 + src/mistralai/audio.py | 20 ++ src/mistralai/extra/__init__.py | 48 ++++ src/mistralai/extra/exceptions.py | 53 +++- src/mistralai/extra/realtime/__init__.py | 25 ++ src/mistralai/extra/realtime/connection.py | 207 +++++++++++++ src/mistralai/extra/realtime/transcription.py | 271 ++++++++++++++++++ uv.lock | 74 ++++- 16 files changed, 1133 insertions(+), 31 deletions(-) create mode 100644 examples/mistral/audio/async_realtime_transcription_microphone.py create mode 100644 examples/mistral/audio/async_realtime_transcription_stream.py create mode 100644 examples/mistral/audio/transcription_diarize_async.py create mode 100644 src/mistralai/extra/realtime/__init__.py create mode 100644 src/mistralai/extra/realtime/connection.py create mode 100644 src/mistralai/extra/realtime/transcription.py diff --git a/examples/mistral/audio/async_realtime_transcription_microphone.py b/examples/mistral/audio/async_realtime_transcription_microphone.py new file mode 100644 index 00000000..748dbcaf --- /dev/null +++ b/examples/mistral/audio/async_realtime_transcription_microphone.py @@ -0,0 +1,225 @@ +#!/usr/bin/env python +# /// script +# requires-python = ">=3.9" +# dependencies = [ +# "mistralai[realtime]", +# "pyaudio", +# "rich", +# ] +# [tool.uv.sources] +# mistralai = { path = "../../..", editable = true } +# /// + +import argparse +import asyncio +import os +import sys +from typing import AsyncIterator + +from rich.align import Align +from rich.console import Console +from rich.layout import Layout +from rich.live import Live +from rich.panel import Panel +from rich.text import Text + +from mistralai import Mistral +from mistralai.extra.realtime import UnknownRealtimeEvent +from mistralai.models import ( + AudioFormat, + RealtimeTranscriptionError, + RealtimeTranscriptionSessionCreated, + TranscriptionStreamDone, + TranscriptionStreamTextDelta, +) + +console = Console() + + +class TranscriptDisplay: + """Manages the live transcript display.""" + + def __init__(self, model: str) -> None: + self.model = model + self.transcript = "" + self.status = "🔌 Connecting..." + self.error: str | None = None + + def set_listening(self) -> None: + self.status = "🎤 Listening..." + + def add_text(self, text: str) -> None: + self.transcript += text + + def set_done(self) -> None: + self.status = "✅ Done" + + def set_error(self, error: str) -> None: + self.status = "❌ Error" + self.error = error + + def render(self) -> Layout: + layout = Layout() + + # Create minimal header + header_text = Text() + header_text.append("│ ", style="dim") + header_text.append(self.model, style="dim") + header_text.append(" │ ", style="dim") + + if "Listening" in self.status: + status_style = "green" + elif "Connecting" in self.status: + status_style = "yellow dim" + elif "Done" in self.status or "Stopped" in self.status: + status_style = "dim" + else: + status_style = "red" + header_text.append(self.status, style=status_style) + + header = Align.left(header_text, vertical="middle", pad=False) + + # Create main transcript area - no title, minimal border + transcript_text = Text( + self.transcript or "...", style="white" if self.transcript else "dim" + ) + transcript = Panel( + Align.left(transcript_text, vertical="top"), + border_style="dim", + padding=(1, 2), + ) + + # Minimal footer + footer_text = Text() + footer_text.append("ctrl+c", style="dim") + footer_text.append(" quit", style="dim italic") + footer = Align.left(footer_text, vertical="middle", pad=False) + + # Handle error display + if self.error: + layout.split_column( + Layout(header, name="header", size=1), + Layout(transcript, name="body"), + Layout( + Panel(Text(self.error, style="red"), border_style="red"), + name="error", + size=4, + ), + Layout(footer, name="footer", size=1), + ) + else: + layout.split_column( + Layout(header, name="header", size=1), + Layout(transcript, name="body"), + Layout(footer, name="footer", size=1), + ) + + return layout + + +async def iter_microphone( + *, + sample_rate: int, + chunk_duration_ms: int, +) -> AsyncIterator[bytes]: + """ + Yield microphone PCM chunks using PyAudio (16-bit mono). + Encoding is always pcm_s16le. + """ + import pyaudio + + p = pyaudio.PyAudio() + chunk_samples = int(sample_rate * chunk_duration_ms / 1000) + + stream = p.open( + format=pyaudio.paInt16, + channels=1, + rate=sample_rate, + input=True, + frames_per_buffer=chunk_samples, + ) + + loop = asyncio.get_running_loop() + try: + while True: + # stream.read is blocking; run it off-thread + data = await loop.run_in_executor(None, stream.read, chunk_samples, False) + yield data + finally: + stream.stop_stream() + stream.close() + p.terminate() + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Real-time microphone transcription.") + parser.add_argument("--model", default="voxtral-mini-transcribe-realtime-2602", help="Model ID") + parser.add_argument( + "--sample-rate", + type=int, + default=16000, + choices=[8000, 16000, 22050, 44100, 48000], + help="Sample rate in Hz", + ) + parser.add_argument( + "--chunk-duration", type=int, default=10, help="Chunk duration in ms" + ) + parser.add_argument( + "--api-key", default=os.environ.get("MISTRAL_API_KEY"), help="Mistral API key" + ) + parser.add_argument( + "--base-url", + default=os.environ.get("MISTRAL_BASE_URL", "wss://api.mistral.ai"), + ) + return parser.parse_args() + + +async def main() -> int: + args = parse_args() + api_key = args.api_key or os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key, server_url=args.base_url) + + # microphone is always pcm_s16le here + audio_format = AudioFormat(encoding="pcm_s16le", sample_rate=args.sample_rate) + + mic_stream = iter_microphone( + sample_rate=args.sample_rate, chunk_duration_ms=args.chunk_duration + ) + + display = TranscriptDisplay(model=args.model) + + with Live( + display.render(), console=console, refresh_per_second=10, screen=True + ) as live: + try: + async for event in client.audio.realtime.transcribe_stream( + audio_stream=mic_stream, + model=args.model, + audio_format=audio_format, + ): + if isinstance(event, RealtimeTranscriptionSessionCreated): + display.set_listening() + live.update(display.render()) + elif isinstance(event, TranscriptionStreamTextDelta): + display.add_text(event.text) + live.update(display.render()) + elif isinstance(event, TranscriptionStreamDone): + display.set_done() + live.update(display.render()) + break + elif isinstance(event, RealtimeTranscriptionError): + display.set_error(str(event.error)) + live.update(display.render()) + return 1 + elif isinstance(event, UnknownRealtimeEvent): + continue + except KeyboardInterrupt: + display.status = "⏹️ Stopped" + live.update(display.render()) + + return 0 + + +if __name__ == "__main__": + sys.exit(asyncio.run(main())) diff --git a/examples/mistral/audio/async_realtime_transcription_stream.py b/examples/mistral/audio/async_realtime_transcription_stream.py new file mode 100644 index 00000000..6dbcd103 --- /dev/null +++ b/examples/mistral/audio/async_realtime_transcription_stream.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python + +import argparse +import asyncio +import os +import subprocess +import sys +import tempfile +from pathlib import Path +from typing import AsyncIterator + +from mistralai import Mistral +from mistralai.extra.realtime.connection import UnknownRealtimeEvent +from mistralai.models import ( + AudioFormat, + RealtimeTranscriptionError, + TranscriptionStreamDone, + TranscriptionStreamTextDelta, +) + + +def convert_audio_to_pcm( + input_path: Path, +) -> Path: + temp_file = tempfile.NamedTemporaryFile(suffix=".pcm", delete=False) + temp_path = Path(temp_file.name) + temp_file.close() + + cmd = [ + "ffmpeg", + "-y", + "-i", + str(input_path), + "-f", + "s16le", + "-ar", + str(16000), + "-ac", + "1", + str(temp_path), + ] + + try: + subprocess.run(cmd, check=True, capture_output=True, text=True) + except subprocess.CalledProcessError as exc: + temp_path.unlink(missing_ok=True) + raise RuntimeError(f"ffmpeg conversion failed: {exc.stderr}") from exc + + return temp_path + + +async def aiter_audio_file( + path: Path, + *, + chunk_size: int = 4096, + chunk_delay: float = 0.0, +) -> AsyncIterator[bytes]: + with open(path, "rb") as f: + while True: + chunk = f.read(chunk_size) + if not chunk: + break + yield chunk + if chunk_delay > 0: + await asyncio.sleep(chunk_delay) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Real-time audio transcription via WebSocket (iterator-based)." + ) + parser.add_argument("file", type=Path, help="Path to the audio file") + parser.add_argument("--model", default="voxtral-mini-2601", help="Model ID") + parser.add_argument( + "--api-key", + default=os.environ.get("MISTRAL_API_KEY"), + help="Mistral API key", + ) + parser.add_argument( + "--base-url", + default=os.environ.get("MISTRAL_BASE_URL", "https://api.mistral.ai"), + help="API base URL (http/https/ws/wss)", + ) + parser.add_argument( + "--chunk-size", type=int, default=4096, help="Audio chunk size in bytes" + ) + parser.add_argument( + "--chunk-delay", + type=float, + default=0.01, + help="Delay between chunks in seconds", + ) + parser.add_argument( + "--no-convert", + action="store_true", + help="Skip ffmpeg conversion (input must be raw PCM)", + ) + return parser.parse_args() + + +async def main() -> int: + args = parse_args() + api_key = args.api_key or os.environ["MISTRAL_API_KEY"] + + pcm_path = args.file + temp_path = None + + if not args.no_convert and args.file.suffix.lower() not in (".pcm", ".raw"): + pcm_path = convert_audio_to_pcm(args.file) + temp_path = pcm_path + + client = Mistral(api_key=api_key, server_url=args.base_url) + + try: + async for event in client.audio.realtime.transcribe_stream( + audio_stream=aiter_audio_file( + pcm_path, + chunk_size=args.chunk_size, + chunk_delay=args.chunk_delay, + ), + model=args.model, + audio_format=AudioFormat(encoding="pcm_s16le", sample_rate=16000), + ): + if isinstance(event, TranscriptionStreamTextDelta): + print(event.text, end="", flush=True) + elif isinstance(event, TranscriptionStreamDone): + print() + break + elif isinstance(event, RealtimeTranscriptionError): + print(f"\nError: {event.error}", file=sys.stderr) + break + elif isinstance(event, UnknownRealtimeEvent): + # ignore future / unknown events; keep going + continue + + finally: + if temp_path is not None: + temp_path.unlink(missing_ok=True) + + return 0 + + +if __name__ == "__main__": + sys.exit(asyncio.run(main())) diff --git a/examples/mistral/audio/chat_base64.py b/examples/mistral/audio/chat_base64.py index ea5ea79a..8468fbfb 100755 --- a/examples/mistral/audio/chat_base64.py +++ b/examples/mistral/audio/chat_base64.py @@ -6,7 +6,6 @@ from mistralai.models import UserMessage - def main(): api_key = os.environ["MISTRAL_API_KEY"] model = "voxtral-small-latest" @@ -16,13 +15,17 @@ def main(): content = f.read() chat_response = client.chat.complete( model=model, - messages=[UserMessage(content=[ - {"type": "text", "text": "What's in this audio file?"}, - { - "type": "input_audio", - "input_audio": base64.b64encode(content).decode('utf-8'), - }, - ])], + messages=[ + UserMessage( + content=[ + {"type": "text", "text": "What's in this audio file?"}, + { + "type": "input_audio", + "input_audio": base64.b64encode(content).decode("utf-8"), + }, + ] + ) + ], ) print(chat_response.choices[0].message.content) diff --git a/examples/mistral/audio/chat_no_streaming.py b/examples/mistral/audio/chat_no_streaming.py index 2caebb25..f10240bd 100755 --- a/examples/mistral/audio/chat_no_streaming.py +++ b/examples/mistral/audio/chat_no_streaming.py @@ -6,7 +6,6 @@ from mistralai.models import UserMessage - def main(): api_key = os.environ["MISTRAL_API_KEY"] model = "voxtral-small-latest" @@ -15,13 +14,17 @@ def main(): chat_response = client.chat.complete( model=model, - messages=[UserMessage(content=[ - {"type": "text", "text": "What is this audio about?"}, - { - "type": "input_audio", - "input_audio": "https://docs.mistral.ai/audio/bcn_weather.mp3", - }, - ])], + messages=[ + UserMessage( + content=[ + {"type": "text", "text": "What is this audio about?"}, + { + "type": "input_audio", + "input_audio": "https://docs.mistral.ai/audio/bcn_weather.mp3", + }, + ] + ) + ], ) print(chat_response.choices[0].message.content) diff --git a/examples/mistral/audio/chat_streaming.py b/examples/mistral/audio/chat_streaming.py index 060bfdd9..f9c913a0 100755 --- a/examples/mistral/audio/chat_streaming.py +++ b/examples/mistral/audio/chat_streaming.py @@ -6,26 +6,31 @@ from mistralai.models import UserMessage - def main(): api_key = os.environ["MISTRAL_API_KEY"] model = "voxtral-small-latest" client = Mistral(api_key=api_key) with open("examples/fixtures/bcn_weather.mp3", "rb") as f: - file = client.files.upload(file=File(content=f, file_name=f.name), purpose="audio") + file = client.files.upload( + file=File(content=f, file_name=f.name), purpose="audio" + ) print(f"Uploaded audio file, id={file.id}") signed_url = client.files.get_signed_url(file_id=file.id) try: chat_response = client.chat.stream( model=model, - messages=[UserMessage(content=[ - {"type": "text", "text": "What is this audio about?"}, - { - "type": "input_audio", - "input_audio": signed_url.url, - }, - ])], + messages=[ + UserMessage( + content=[ + {"type": "text", "text": "What is this audio about?"}, + { + "type": "input_audio", + "input_audio": signed_url.url, + }, + ] + ) + ], ) for chunk in chat_response: print(chunk.data.choices[0].delta.content) @@ -33,5 +38,6 @@ def main(): client.files.delete(file_id=file.id) print(f"Deleted audio file, id={file.id}") + if __name__ == "__main__": main() diff --git a/examples/mistral/audio/transcription_diarize_async.py b/examples/mistral/audio/transcription_diarize_async.py new file mode 100644 index 00000000..ef5323f4 --- /dev/null +++ b/examples/mistral/audio/transcription_diarize_async.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python + +import os +import asyncio +from mistralai import Mistral, File + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "voxtral-mini-2602" + + client = Mistral(api_key=api_key) + with open("examples/fixtures/bcn_weather.mp3", "rb") as f: + response = await client.audio.transcriptions.complete_async( + model=model, + file=File(content=f, file_name=f.name), + diarize=True, + timestamp_granularities=["segment"], + ) + for segment in response.segments: + speaker = segment.speaker_id or "unknown" + print( + f"[{segment.start:.1f}s → {segment.end:.1f}s] {speaker}: {segment.text.strip()}" + ) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py index 94a01c6f..e728b8fa 100644 --- a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py +++ b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py @@ -37,4 +37,3 @@ async def main(): if __name__ == "__main__": asyncio.run(main()) - diff --git a/pyproject.toml b/pyproject.toml index 680ae19b..dbb5d44a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,6 +29,9 @@ agents = [ "griffe >=1.7.3,<2.0", "authlib >=1.5.2,<2.0", ] +realtime = [ + "websockets >=13.0", +] [project.urls] Repository = "https://github.com/mistralai/client-python.git" diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index 106c10b2..5191033a 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -40,6 +40,9 @@ exclude_files=( "examples/mistral/agents/async_conversation_run_stream.py" "examples/mistral/agents/async_conversation_run_mcp.py" "examples/mistral/agents/async_conversation_run_mcp_remote.py" + "examples/mistral/audio/async_realtime_transcription_microphone.py" + "examples/mistral/audio/async_realtime_transcription_stream.py" + "examples/mistral/audio/transcription_diarize_async.py" ) # Check if the no-extra-dep flag is set diff --git a/src/mistralai/audio.py b/src/mistralai/audio.py index 5687abdb..54430d49 100644 --- a/src/mistralai/audio.py +++ b/src/mistralai/audio.py @@ -1,5 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# region imports +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from mistralai.extra.realtime import RealtimeTranscription +# endregion imports + from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration from mistralai.transcriptions import Transcriptions @@ -21,3 +28,16 @@ def _init_sdks(self): self.transcriptions = Transcriptions( self.sdk_configuration, parent_ref=self.parent_ref ) + + # region sdk-class-body + @property + def realtime(self) -> "RealtimeTranscription": + """Returns a client for real-time audio transcription via WebSocket.""" + if not hasattr(self, "_realtime"): + from mistralai.extra.realtime import RealtimeTranscription + + self._realtime = RealtimeTranscription(self.sdk_configuration) + + return self._realtime + + # endregion sdk-class-body diff --git a/src/mistralai/extra/__init__.py b/src/mistralai/extra/__init__.py index d9a81d24..cabda728 100644 --- a/src/mistralai/extra/__init__.py +++ b/src/mistralai/extra/__init__.py @@ -1,3 +1,5 @@ +from typing import TYPE_CHECKING + from .struct_chat import ( ParsedChatCompletionResponse, convert_to_parsed_chat_completion_response, @@ -5,9 +7,55 @@ from .utils import response_format_from_pydantic_model from .utils.response_format import CustomPydanticModel +if TYPE_CHECKING: + from .realtime import ( + AudioEncoding, + AudioFormat, + RealtimeConnection, + RealtimeTranscriptionError, + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionUpdated, + RealtimeTranscription, + UnknownRealtimeEvent, + ) + +_REALTIME_EXPORTS = { + "RealtimeTranscription", + "RealtimeConnection", + "AudioEncoding", + "AudioFormat", + "UnknownRealtimeEvent", + "RealtimeTranscriptionError", + "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionSession", + "RealtimeTranscriptionSessionCreated", + "RealtimeTranscriptionSessionUpdated", +} + + +def __getattr__(name: str): + if name in _REALTIME_EXPORTS: + from . import realtime + + return getattr(realtime, name) + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + __all__ = [ "convert_to_parsed_chat_completion_response", "response_format_from_pydantic_model", "CustomPydanticModel", "ParsedChatCompletionResponse", + "RealtimeTranscription", + "RealtimeConnection", + "AudioEncoding", + "AudioFormat", + "UnknownRealtimeEvent", + "RealtimeTranscriptionError", + "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionSession", + "RealtimeTranscriptionSessionCreated", + "RealtimeTranscriptionSessionUpdated", ] diff --git a/src/mistralai/extra/exceptions.py b/src/mistralai/extra/exceptions.py index 7853ddc2..ee107698 100644 --- a/src/mistralai/extra/exceptions.py +++ b/src/mistralai/extra/exceptions.py @@ -1,14 +1,59 @@ +from typing import Optional, TYPE_CHECKING + +if TYPE_CHECKING: + from mistralai.models import RealtimeTranscriptionError + + class MistralClientException(Exception): - """Base exception for all the client errors.""" + """Base exception for client errors.""" class RunException(MistralClientException): - """Exception raised for errors during a conversation run.""" + """Conversation run errors.""" class MCPException(MistralClientException): - """Exception raised for errors related to MCP operations.""" + """MCP operation errors.""" class MCPAuthException(MCPException): - """Exception raised for authentication errors with an MCP server.""" + """MCP authentication errors.""" + + +class RealtimeTranscriptionException(MistralClientException): + """Base realtime transcription exception.""" + + def __init__( + self, + message: str, + *, + code: Optional[int] = None, + payload: Optional[object] = None, + ) -> None: + super().__init__(message) + self.code = code + self.payload = payload + + +class RealtimeTranscriptionWSError(RealtimeTranscriptionException): + def __init__( + self, + message: str, + *, + payload: Optional["RealtimeTranscriptionError"] = None, + raw: Optional[object] = None, + ) -> None: + code: Optional[int] = None + if payload is not None: + try: + maybe_code = getattr(payload.error, "code", None) + if isinstance(maybe_code, int): + code = maybe_code + except Exception: + code = None + + super().__init__( + message, code=code, payload=payload if payload is not None else raw + ) + self.payload_typed = payload + self.payload_raw = raw diff --git a/src/mistralai/extra/realtime/__init__.py b/src/mistralai/extra/realtime/__init__.py new file mode 100644 index 00000000..85bf1d88 --- /dev/null +++ b/src/mistralai/extra/realtime/__init__.py @@ -0,0 +1,25 @@ +from mistralai.models import ( + AudioEncoding, + AudioFormat, + RealtimeTranscriptionError, + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionUpdated, +) + +from .connection import UnknownRealtimeEvent, RealtimeConnection +from .transcription import RealtimeTranscription + +__all__ = [ + "AudioEncoding", + "AudioFormat", + "RealtimeTranscriptionError", + "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionSession", + "RealtimeTranscriptionSessionCreated", + "RealtimeTranscriptionSessionUpdated", + "RealtimeConnection", + "RealtimeTranscription", + "UnknownRealtimeEvent", +] diff --git a/src/mistralai/extra/realtime/connection.py b/src/mistralai/extra/realtime/connection.py new file mode 100644 index 00000000..042854ab --- /dev/null +++ b/src/mistralai/extra/realtime/connection.py @@ -0,0 +1,207 @@ +from __future__ import annotations + +import base64 +import json +from asyncio import CancelledError +from collections import deque +from typing import Any, AsyncIterator, Deque, Optional, Union + +from pydantic import ValidationError, BaseModel + +try: + from websockets.asyncio.client import ClientConnection # websockets >= 13.0 +except ImportError as exc: + raise ImportError( + "The `websockets` package (>=13.0) is required for real-time transcription. " + "Install with: pip install 'mistralai[realtime]'" + ) from exc + +from mistralai.models import ( + AudioFormat, + RealtimeTranscriptionError, + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionUpdated, + TranscriptionStreamDone, + TranscriptionStreamLanguage, + TranscriptionStreamSegmentDelta, + TranscriptionStreamTextDelta, +) + + +class UnknownRealtimeEvent(BaseModel): + """ + Forward-compat fallback event: + - unknown message type + - invalid JSON payload + - schema validation failure + """ + type: Optional[str] + content: Any + error: Optional[str] = None + + +RealtimeEvent = Union[ + # session lifecycle + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionUpdated, + # server errors + RealtimeTranscriptionError, + # transcription events + TranscriptionStreamLanguage, + TranscriptionStreamSegmentDelta, + TranscriptionStreamTextDelta, + TranscriptionStreamDone, + # forward-compat fallback + UnknownRealtimeEvent, +] + + +_MESSAGE_MODELS: dict[str, Any] = { + "session.created": RealtimeTranscriptionSessionCreated, + "session.updated": RealtimeTranscriptionSessionUpdated, + "error": RealtimeTranscriptionError, + "transcription.language": TranscriptionStreamLanguage, + "transcription.segment": TranscriptionStreamSegmentDelta, + "transcription.text.delta": TranscriptionStreamTextDelta, + "transcription.done": TranscriptionStreamDone, +} + + +def parse_realtime_event(payload: Any) -> RealtimeEvent: + """ + Tolerant parser: + - unknown event type -> UnknownRealtimeEvent + - validation failures -> UnknownRealtimeEvent (includes error string) + - invalid payload -> UnknownRealtimeEvent + """ + if not isinstance(payload, dict): + return UnknownRealtimeEvent( + type=None, content=payload, error="expected JSON object" + ) + + msg_type = payload.get("type") + if not isinstance(msg_type, str): + return UnknownRealtimeEvent( + type=None, content=payload, error="missing/invalid 'type'" + ) + + model_cls = _MESSAGE_MODELS.get(msg_type) + if model_cls is None: + return UnknownRealtimeEvent( + type=msg_type, content=payload, error="unknown event type" + ) + try: + parsed = model_cls.model_validate(payload) + return parsed + except ValidationError as exc: + return UnknownRealtimeEvent(type=msg_type, content=payload, error=str(exc)) + + +class RealtimeConnection: + def __init__( + self, + websocket: ClientConnection, + session: RealtimeTranscriptionSession, + *, + initial_events: Optional[list[RealtimeEvent]] = None, + ) -> None: + self._websocket = websocket + self._session = session + self._audio_format = session.audio_format + self._closed = False + self._initial_events: Deque[RealtimeEvent] = deque(initial_events or []) + + @property + def request_id(self) -> str: + return self._session.request_id + + @property + def session(self) -> RealtimeTranscriptionSession: + return self._session + + @property + def audio_format(self) -> AudioFormat: + return self._audio_format + + @property + def is_closed(self) -> bool: + return self._closed + + async def send_audio( + self, audio_bytes: Union[bytes, bytearray, memoryview] + ) -> None: + if self._closed: + raise RuntimeError("Connection is closed") + + message = { + "type": "input_audio.append", + "audio": base64.b64encode(bytes(audio_bytes)).decode("ascii"), + } + await self._websocket.send(json.dumps(message)) + + async def update_session(self, audio_format: AudioFormat) -> None: + if self._closed: + raise RuntimeError("Connection is closed") + + self._audio_format = audio_format + message = { + "type": "session.update", + "session": {"audio_format": audio_format.model_dump(mode="json")}, + } + await self._websocket.send(json.dumps(message)) + + async def end_audio(self) -> None: + if self._closed: + return + await self._websocket.send(json.dumps({"type": "input_audio.end"})) + + async def close(self, *, code: int = 1000, reason: str = "") -> None: + if self._closed: + return + self._closed = True + await self._websocket.close(code=code, reason=reason) + + async def __aenter__(self) -> "RealtimeConnection": + return self + + async def __aexit__(self, exc_type, exc, tb) -> None: + await self.close() + + def __aiter__(self) -> AsyncIterator[RealtimeEvent]: + return self.events() + + async def events(self) -> AsyncIterator[RealtimeEvent]: + # replay any handshake/prelude events (including session.created) + while self._initial_events: + ev = self._initial_events.popleft() + self._apply_session_updates(ev) + yield ev + + try: + async for msg in self._websocket: + text = ( + msg.decode("utf-8", errors="replace") + if isinstance(msg, (bytes, bytearray)) + else msg + ) + try: + data = json.loads(text) + except Exception as exc: + yield UnknownRealtimeEvent( + type=None, content=text, error=f"invalid JSON: {exc}" + ) + continue + + ev = parse_realtime_event(data) + self._apply_session_updates(ev) + yield ev + except CancelledError: + pass + finally: + await self.close() + + def _apply_session_updates(self, ev: RealtimeEvent) -> None: + if isinstance(ev, RealtimeTranscriptionSessionCreated) or isinstance(ev, RealtimeTranscriptionSessionUpdated): + self._session = ev.session + self._audio_format = ev.session.audio_format diff --git a/src/mistralai/extra/realtime/transcription.py b/src/mistralai/extra/realtime/transcription.py new file mode 100644 index 00000000..de117645 --- /dev/null +++ b/src/mistralai/extra/realtime/transcription.py @@ -0,0 +1,271 @@ +from __future__ import annotations + +import asyncio +import json +import time +from typing import AsyncIterator, Mapping, Optional +from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse + +try: + from websockets.asyncio.client import ( + ClientConnection, + connect, + ) # websockets >= 13.0 +except ImportError as exc: + raise ImportError( + "The `websockets` package (>=13.0) is required for real-time transcription. " + "Install with: pip install 'mistralai[realtime]'" + ) from exc + +from mistralai import models, utils +from mistralai.models import ( + AudioFormat, + RealtimeTranscriptionError, + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionCreated, +) +from mistralai.sdkconfiguration import SDKConfiguration +from mistralai.utils import generate_url, get_security, get_security_from_env + +from ..exceptions import RealtimeTranscriptionException, RealtimeTranscriptionWSError +from .connection import ( + RealtimeConnection, + RealtimeEvent, + UnknownRealtimeEvent, + parse_realtime_event, +) + + +class RealtimeTranscription: + """Client for realtime transcription over WebSocket (websockets >= 13.0).""" + + def __init__(self, sdk_config: SDKConfiguration) -> None: + self._sdk_config = sdk_config + + def _build_url( + self, + model: str, + *, + server_url: Optional[str], + query_params: Mapping[str, str], + ) -> str: + if server_url is not None: + base_url = utils.remove_suffix(server_url, "/") + else: + base_url, _ = self._sdk_config.get_server_details() + + url = generate_url(base_url, "/v1/audio/transcriptions/realtime", None) + + parsed = urlparse(url) + merged = dict(parse_qsl(parsed.query, keep_blank_values=True)) + merged["model"] = model + merged.update(dict(query_params)) + + return urlunparse(parsed._replace(query=urlencode(merged))) + + async def connect( + self, + model: str, + audio_format: Optional[AudioFormat] = None, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> RealtimeConnection: + if timeout_ms is None: + timeout_ms = self._sdk_config.timeout_ms + + security = self._sdk_config.security + if security is not None and callable(security): + security = security() + + resolved_security = get_security_from_env(security, models.Security) + + headers: dict[str, str] = {} + query_params: dict[str, str] = {} + + if resolved_security is not None: + security_headers, security_query = get_security(resolved_security) + headers |= security_headers + for key, values in security_query.items(): + if values: + query_params[key] = values[-1] + + if http_headers is not None: + headers |= dict(http_headers) + + url = self._build_url(model, server_url=server_url, query_params=query_params) + + parsed = urlparse(url) + if parsed.scheme == "https": + parsed = parsed._replace(scheme="wss") + elif parsed.scheme == "http": + parsed = parsed._replace(scheme="ws") + ws_url = urlunparse(parsed) + open_timeout = None if timeout_ms is None else timeout_ms / 1000.0 + user_agent = self._sdk_config.user_agent + + websocket: Optional[ClientConnection] = None + try: + websocket = await connect( + ws_url, + additional_headers=dict(headers), + open_timeout=open_timeout, + user_agent_header=user_agent, + ) + + session, initial_events = await _recv_handshake( + websocket, timeout_ms=timeout_ms + ) + connection = RealtimeConnection( + websocket=websocket, + session=session, + initial_events=initial_events, + ) + + if audio_format is not None: + await connection.update_session(audio_format) + + return connection + + except RealtimeTranscriptionException: + if websocket is not None: + await websocket.close() + raise + except Exception as exc: + if websocket is not None: + await websocket.close() + raise RealtimeTranscriptionException(f"Failed to connect: {exc}") from exc + + async def transcribe_stream( + self, + audio_stream: AsyncIterator[bytes], + model: str, + audio_format: Optional[AudioFormat] = None, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> AsyncIterator[RealtimeEvent]: + """ + Flow + - opens connection + - streams audio in background + - yields events from the connection + """ + async with await self.connect( + model=model, + audio_format=audio_format, + server_url=server_url, + timeout_ms=timeout_ms, + http_headers=http_headers, + ) as connection: + + async def _send() -> None: + async for chunk in audio_stream: + if connection.is_closed: + break + await connection.send_audio(chunk) + await connection.end_audio() + + send_task = asyncio.create_task(_send()) + + try: + async for event in connection: + yield event + + # stop early (caller still sees the terminating event) + if isinstance(event, RealtimeTranscriptionError): + break + if getattr(event, "type", None) == "transcription.done": + break + finally: + send_task.cancel() + try: + await send_task + except asyncio.CancelledError: + pass + await connection.close() + + +def _extract_error_message(payload: dict) -> str: + err = payload.get("error") + if isinstance(err, dict): + msg = err.get("message") + if isinstance(msg, str): + return msg + if isinstance(msg, dict): + detail = msg.get("detail") + if isinstance(detail, str): + return detail + return "Realtime transcription error" + + +async def _recv_handshake( + websocket: ClientConnection, + *, + timeout_ms: Optional[int], +) -> tuple[RealtimeTranscriptionSession, list[RealtimeEvent]]: + """ + Read messages until session.created or error. + Replay all messages read during handshake as initial events (lossless). + """ + timeout_s = None if timeout_ms is None else timeout_ms / 1000.0 + deadline = None if timeout_s is None else (time.monotonic() + timeout_s) + + initial_events: list[RealtimeEvent] = [] + + def remaining() -> Optional[float]: + if deadline is None: + return None + return max(0.0, deadline - time.monotonic()) + + try: + while True: + raw = await asyncio.wait_for(websocket.recv(), timeout=remaining()) + text = ( + raw.decode("utf-8", errors="replace") + if isinstance(raw, (bytes, bytearray)) + else raw + ) + + try: + payload = json.loads(text) + except Exception as exc: + initial_events.append( + UnknownRealtimeEvent( + type=None, content=text, error=f"invalid JSON: {exc}" + ) + ) + continue + + msg_type = payload.get("type") if isinstance(payload, dict) else None + if msg_type == "error" and isinstance(payload, dict): + parsed = parse_realtime_event(payload) + initial_events.append(parsed) + if isinstance(parsed, RealtimeTranscriptionError): + raise RealtimeTranscriptionWSError( + _extract_error_message(payload), + payload=parsed, + raw=payload, + ) + raise RealtimeTranscriptionWSError( + _extract_error_message(payload), + payload=None, + raw=payload, + ) + + event = parse_realtime_event(payload) + initial_events.append(event) + + if isinstance(event, RealtimeTranscriptionSessionCreated): + return event.session, initial_events + + except asyncio.TimeoutError as exc: + raise RealtimeTranscriptionException( + "Timeout waiting for session creation." + ) from exc + except RealtimeTranscriptionException: + raise + except Exception as exc: + raise RealtimeTranscriptionException( + f"Unexpected websocket handshake failure: {exc}" + ) from exc diff --git a/uv.lock b/uv.lock index efffa7ad..85e04bd9 100644 --- a/uv.lock +++ b/uv.lock @@ -589,6 +589,9 @@ gcp = [ { name = "google-auth" }, { name = "requests" }, ] +realtime = [ + { name = "websockets" }, +] [package.dev-dependencies] dev = [ @@ -627,8 +630,9 @@ requires-dist = [ { name = "pyyaml", specifier = ">=6.0.2,<7.0.0" }, { name = "requests", marker = "extra == 'gcp'", specifier = ">=2.32.3" }, { name = "typing-inspection", specifier = ">=0.4.0" }, + { name = "websockets", marker = "extra == 'realtime'", specifier = ">=13.0" }, ] -provides-extras = ["gcp", "agents"] +provides-extras = ["gcp", "agents", "realtime"] [package.metadata.requires-dev] dev = [ @@ -1562,6 +1566,74 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3d/d8/2083a1daa7439a66f3a48589a57d576aa117726762618f6bb09fe3798796/uvicorn-0.40.0-py3-none-any.whl", hash = "sha256:c6c8f55bc8bf13eb6fa9ff87ad62308bbbc33d0b67f84293151efe87e0d5f2ee", size = 68502, upload-time = "2025-12-21T14:16:21.041Z" }, ] +[[package]] +name = "websockets" +version = "16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/04/24/4b2031d72e840ce4c1ccb255f693b15c334757fc50023e4db9537080b8c4/websockets-16.0.tar.gz", hash = "sha256:5f6261a5e56e8d5c42a4497b364ea24d94d9563e8fbd44e78ac40879c60179b5", size = 179346, upload-time = "2026-01-10T09:23:47.181Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/74/221f58decd852f4b59cc3354cccaf87e8ef695fede361d03dc9a7396573b/websockets-16.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:04cdd5d2d1dacbad0a7bf36ccbcd3ccd5a30ee188f2560b7a62a30d14107b31a", size = 177343, upload-time = "2026-01-10T09:22:21.28Z" }, + { url = "https://files.pythonhosted.org/packages/19/0f/22ef6107ee52ab7f0b710d55d36f5a5d3ef19e8a205541a6d7ffa7994e5a/websockets-16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8ff32bb86522a9e5e31439a58addbb0166f0204d64066fb955265c4e214160f0", size = 175021, upload-time = "2026-01-10T09:22:22.696Z" }, + { url = "https://files.pythonhosted.org/packages/10/40/904a4cb30d9b61c0e278899bf36342e9b0208eb3c470324a9ecbaac2a30f/websockets-16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:583b7c42688636f930688d712885cf1531326ee05effd982028212ccc13e5957", size = 175320, upload-time = "2026-01-10T09:22:23.94Z" }, + { url = "https://files.pythonhosted.org/packages/9d/2f/4b3ca7e106bc608744b1cdae041e005e446124bebb037b18799c2d356864/websockets-16.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7d837379b647c0c4c2355c2499723f82f1635fd2c26510e1f587d89bc2199e72", size = 183815, upload-time = "2026-01-10T09:22:25.469Z" }, + { url = "https://files.pythonhosted.org/packages/86/26/d40eaa2a46d4302becec8d15b0fc5e45bdde05191e7628405a19cf491ccd/websockets-16.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:df57afc692e517a85e65b72e165356ed1df12386ecb879ad5693be08fac65dde", size = 185054, upload-time = "2026-01-10T09:22:27.101Z" }, + { url = "https://files.pythonhosted.org/packages/b0/ba/6500a0efc94f7373ee8fefa8c271acdfd4dca8bd49a90d4be7ccabfc397e/websockets-16.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2b9f1e0d69bc60a4a87349d50c09a037a2607918746f07de04df9e43252c77a3", size = 184565, upload-time = "2026-01-10T09:22:28.293Z" }, + { url = "https://files.pythonhosted.org/packages/04/b4/96bf2cee7c8d8102389374a2616200574f5f01128d1082f44102140344cc/websockets-16.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:335c23addf3d5e6a8633f9f8eda77efad001671e80b95c491dd0924587ece0b3", size = 183848, upload-time = "2026-01-10T09:22:30.394Z" }, + { url = "https://files.pythonhosted.org/packages/02/8e/81f40fb00fd125357814e8c3025738fc4ffc3da4b6b4a4472a82ba304b41/websockets-16.0-cp310-cp310-win32.whl", hash = "sha256:37b31c1623c6605e4c00d466c9d633f9b812ea430c11c8a278774a1fde1acfa9", size = 178249, upload-time = "2026-01-10T09:22:32.083Z" }, + { url = "https://files.pythonhosted.org/packages/b4/5f/7e40efe8df57db9b91c88a43690ac66f7b7aa73a11aa6a66b927e44f26fa/websockets-16.0-cp310-cp310-win_amd64.whl", hash = "sha256:8e1dab317b6e77424356e11e99a432b7cb2f3ec8c5ab4dabbcee6add48f72b35", size = 178685, upload-time = "2026-01-10T09:22:33.345Z" }, + { url = "https://files.pythonhosted.org/packages/f2/db/de907251b4ff46ae804ad0409809504153b3f30984daf82a1d84a9875830/websockets-16.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:31a52addea25187bde0797a97d6fc3d2f92b6f72a9370792d65a6e84615ac8a8", size = 177340, upload-time = "2026-01-10T09:22:34.539Z" }, + { url = "https://files.pythonhosted.org/packages/f3/fa/abe89019d8d8815c8781e90d697dec52523fb8ebe308bf11664e8de1877e/websockets-16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:417b28978cdccab24f46400586d128366313e8a96312e4b9362a4af504f3bbad", size = 175022, upload-time = "2026-01-10T09:22:36.332Z" }, + { url = "https://files.pythonhosted.org/packages/58/5d/88ea17ed1ded2079358b40d31d48abe90a73c9e5819dbcde1606e991e2ad/websockets-16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af80d74d4edfa3cb9ed973a0a5ba2b2a549371f8a741e0800cb07becdd20f23d", size = 175319, upload-time = "2026-01-10T09:22:37.602Z" }, + { url = "https://files.pythonhosted.org/packages/d2/ae/0ee92b33087a33632f37a635e11e1d99d429d3d323329675a6022312aac2/websockets-16.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:08d7af67b64d29823fed316505a89b86705f2b7981c07848fb5e3ea3020c1abe", size = 184631, upload-time = "2026-01-10T09:22:38.789Z" }, + { url = "https://files.pythonhosted.org/packages/c8/c5/27178df583b6c5b31b29f526ba2da5e2f864ecc79c99dae630a85d68c304/websockets-16.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7be95cfb0a4dae143eaed2bcba8ac23f4892d8971311f1b06f3c6b78952ee70b", size = 185870, upload-time = "2026-01-10T09:22:39.893Z" }, + { url = "https://files.pythonhosted.org/packages/87/05/536652aa84ddc1c018dbb7e2c4cbcd0db884580bf8e95aece7593fde526f/websockets-16.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d6297ce39ce5c2e6feb13c1a996a2ded3b6832155fcfc920265c76f24c7cceb5", size = 185361, upload-time = "2026-01-10T09:22:41.016Z" }, + { url = "https://files.pythonhosted.org/packages/6d/e2/d5332c90da12b1e01f06fb1b85c50cfc489783076547415bf9f0a659ec19/websockets-16.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1c1b30e4f497b0b354057f3467f56244c603a79c0d1dafce1d16c283c25f6e64", size = 184615, upload-time = "2026-01-10T09:22:42.442Z" }, + { url = "https://files.pythonhosted.org/packages/77/fb/d3f9576691cae9253b51555f841bc6600bf0a983a461c79500ace5a5b364/websockets-16.0-cp311-cp311-win32.whl", hash = "sha256:5f451484aeb5cafee1ccf789b1b66f535409d038c56966d6101740c1614b86c6", size = 178246, upload-time = "2026-01-10T09:22:43.654Z" }, + { url = "https://files.pythonhosted.org/packages/54/67/eaff76b3dbaf18dcddabc3b8c1dba50b483761cccff67793897945b37408/websockets-16.0-cp311-cp311-win_amd64.whl", hash = "sha256:8d7f0659570eefb578dacde98e24fb60af35350193e4f56e11190787bee77dac", size = 178684, upload-time = "2026-01-10T09:22:44.941Z" }, + { url = "https://files.pythonhosted.org/packages/84/7b/bac442e6b96c9d25092695578dda82403c77936104b5682307bd4deb1ad4/websockets-16.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:71c989cbf3254fbd5e84d3bff31e4da39c43f884e64f2551d14bb3c186230f00", size = 177365, upload-time = "2026-01-10T09:22:46.787Z" }, + { url = "https://files.pythonhosted.org/packages/b0/fe/136ccece61bd690d9c1f715baaeefd953bb2360134de73519d5df19d29ca/websockets-16.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8b6e209ffee39ff1b6d0fa7bfef6de950c60dfb91b8fcead17da4ee539121a79", size = 175038, upload-time = "2026-01-10T09:22:47.999Z" }, + { url = "https://files.pythonhosted.org/packages/40/1e/9771421ac2286eaab95b8575b0cb701ae3663abf8b5e1f64f1fd90d0a673/websockets-16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:86890e837d61574c92a97496d590968b23c2ef0aeb8a9bc9421d174cd378ae39", size = 175328, upload-time = "2026-01-10T09:22:49.809Z" }, + { url = "https://files.pythonhosted.org/packages/18/29/71729b4671f21e1eaa5d6573031ab810ad2936c8175f03f97f3ff164c802/websockets-16.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9b5aca38b67492ef518a8ab76851862488a478602229112c4b0d58d63a7a4d5c", size = 184915, upload-time = "2026-01-10T09:22:51.071Z" }, + { url = "https://files.pythonhosted.org/packages/97/bb/21c36b7dbbafc85d2d480cd65df02a1dc93bf76d97147605a8e27ff9409d/websockets-16.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e0334872c0a37b606418ac52f6ab9cfd17317ac26365f7f65e203e2d0d0d359f", size = 186152, upload-time = "2026-01-10T09:22:52.224Z" }, + { url = "https://files.pythonhosted.org/packages/4a/34/9bf8df0c0cf88fa7bfe36678dc7b02970c9a7d5e065a3099292db87b1be2/websockets-16.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a0b31e0b424cc6b5a04b8838bbaec1688834b2383256688cf47eb97412531da1", size = 185583, upload-time = "2026-01-10T09:22:53.443Z" }, + { url = "https://files.pythonhosted.org/packages/47/88/4dd516068e1a3d6ab3c7c183288404cd424a9a02d585efbac226cb61ff2d/websockets-16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:485c49116d0af10ac698623c513c1cc01c9446c058a4e61e3bf6c19dff7335a2", size = 184880, upload-time = "2026-01-10T09:22:55.033Z" }, + { url = "https://files.pythonhosted.org/packages/91/d6/7d4553ad4bf1c0421e1ebd4b18de5d9098383b5caa1d937b63df8d04b565/websockets-16.0-cp312-cp312-win32.whl", hash = "sha256:eaded469f5e5b7294e2bdca0ab06becb6756ea86894a47806456089298813c89", size = 178261, upload-time = "2026-01-10T09:22:56.251Z" }, + { url = "https://files.pythonhosted.org/packages/c3/f0/f3a17365441ed1c27f850a80b2bc680a0fa9505d733fe152fdf5e98c1c0b/websockets-16.0-cp312-cp312-win_amd64.whl", hash = "sha256:5569417dc80977fc8c2d43a86f78e0a5a22fee17565d78621b6bb264a115d4ea", size = 178693, upload-time = "2026-01-10T09:22:57.478Z" }, + { url = "https://files.pythonhosted.org/packages/cc/9c/baa8456050d1c1b08dd0ec7346026668cbc6f145ab4e314d707bb845bf0d/websockets-16.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:878b336ac47938b474c8f982ac2f7266a540adc3fa4ad74ae96fea9823a02cc9", size = 177364, upload-time = "2026-01-10T09:22:59.333Z" }, + { url = "https://files.pythonhosted.org/packages/7e/0c/8811fc53e9bcff68fe7de2bcbe75116a8d959ac699a3200f4847a8925210/websockets-16.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:52a0fec0e6c8d9a784c2c78276a48a2bdf099e4ccc2a4cad53b27718dbfd0230", size = 175039, upload-time = "2026-01-10T09:23:01.171Z" }, + { url = "https://files.pythonhosted.org/packages/aa/82/39a5f910cb99ec0b59e482971238c845af9220d3ab9fa76dd9162cda9d62/websockets-16.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e6578ed5b6981005df1860a56e3617f14a6c307e6a71b4fff8c48fdc50f3ed2c", size = 175323, upload-time = "2026-01-10T09:23:02.341Z" }, + { url = "https://files.pythonhosted.org/packages/bd/28/0a25ee5342eb5d5f297d992a77e56892ecb65e7854c7898fb7d35e9b33bd/websockets-16.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:95724e638f0f9c350bb1c2b0a7ad0e83d9cc0c9259f3ea94e40d7b02a2179ae5", size = 184975, upload-time = "2026-01-10T09:23:03.756Z" }, + { url = "https://files.pythonhosted.org/packages/f9/66/27ea52741752f5107c2e41fda05e8395a682a1e11c4e592a809a90c6a506/websockets-16.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c0204dc62a89dc9d50d682412c10b3542d748260d743500a85c13cd1ee4bde82", size = 186203, upload-time = "2026-01-10T09:23:05.01Z" }, + { url = "https://files.pythonhosted.org/packages/37/e5/8e32857371406a757816a2b471939d51c463509be73fa538216ea52b792a/websockets-16.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:52ac480f44d32970d66763115edea932f1c5b1312de36df06d6b219f6741eed8", size = 185653, upload-time = "2026-01-10T09:23:06.301Z" }, + { url = "https://files.pythonhosted.org/packages/9b/67/f926bac29882894669368dc73f4da900fcdf47955d0a0185d60103df5737/websockets-16.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6e5a82b677f8f6f59e8dfc34ec06ca6b5b48bc4fcda346acd093694cc2c24d8f", size = 184920, upload-time = "2026-01-10T09:23:07.492Z" }, + { url = "https://files.pythonhosted.org/packages/3c/a1/3d6ccdcd125b0a42a311bcd15a7f705d688f73b2a22d8cf1c0875d35d34a/websockets-16.0-cp313-cp313-win32.whl", hash = "sha256:abf050a199613f64c886ea10f38b47770a65154dc37181bfaff70c160f45315a", size = 178255, upload-time = "2026-01-10T09:23:09.245Z" }, + { url = "https://files.pythonhosted.org/packages/6b/ae/90366304d7c2ce80f9b826096a9e9048b4bb760e44d3b873bb272cba696b/websockets-16.0-cp313-cp313-win_amd64.whl", hash = "sha256:3425ac5cf448801335d6fdc7ae1eb22072055417a96cc6b31b3861f455fbc156", size = 178689, upload-time = "2026-01-10T09:23:10.483Z" }, + { url = "https://files.pythonhosted.org/packages/f3/1d/e88022630271f5bd349ed82417136281931e558d628dd52c4d8621b4a0b2/websockets-16.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:8cc451a50f2aee53042ac52d2d053d08bf89bcb31ae799cb4487587661c038a0", size = 177406, upload-time = "2026-01-10T09:23:12.178Z" }, + { url = "https://files.pythonhosted.org/packages/f2/78/e63be1bf0724eeb4616efb1ae1c9044f7c3953b7957799abb5915bffd38e/websockets-16.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:daa3b6ff70a9241cf6c7fc9e949d41232d9d7d26fd3522b1ad2b4d62487e9904", size = 175085, upload-time = "2026-01-10T09:23:13.511Z" }, + { url = "https://files.pythonhosted.org/packages/bb/f4/d3c9220d818ee955ae390cf319a7c7a467beceb24f05ee7aaaa2414345ba/websockets-16.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:fd3cb4adb94a2a6e2b7c0d8d05cb94e6f1c81a0cf9dc2694fb65c7e8d94c42e4", size = 175328, upload-time = "2026-01-10T09:23:14.727Z" }, + { url = "https://files.pythonhosted.org/packages/63/bc/d3e208028de777087e6fb2b122051a6ff7bbcca0d6df9d9c2bf1dd869ae9/websockets-16.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:781caf5e8eee67f663126490c2f96f40906594cb86b408a703630f95550a8c3e", size = 185044, upload-time = "2026-01-10T09:23:15.939Z" }, + { url = "https://files.pythonhosted.org/packages/ad/6e/9a0927ac24bd33a0a9af834d89e0abc7cfd8e13bed17a86407a66773cc0e/websockets-16.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:caab51a72c51973ca21fa8a18bd8165e1a0183f1ac7066a182ff27107b71e1a4", size = 186279, upload-time = "2026-01-10T09:23:17.148Z" }, + { url = "https://files.pythonhosted.org/packages/b9/ca/bf1c68440d7a868180e11be653c85959502efd3a709323230314fda6e0b3/websockets-16.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:19c4dc84098e523fd63711e563077d39e90ec6702aff4b5d9e344a60cb3c0cb1", size = 185711, upload-time = "2026-01-10T09:23:18.372Z" }, + { url = "https://files.pythonhosted.org/packages/c4/f8/fdc34643a989561f217bb477cbc47a3a07212cbda91c0e4389c43c296ebf/websockets-16.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:a5e18a238a2b2249c9a9235466b90e96ae4795672598a58772dd806edc7ac6d3", size = 184982, upload-time = "2026-01-10T09:23:19.652Z" }, + { url = "https://files.pythonhosted.org/packages/dd/d1/574fa27e233764dbac9c52730d63fcf2823b16f0856b3329fc6268d6ae4f/websockets-16.0-cp314-cp314-win32.whl", hash = "sha256:a069d734c4a043182729edd3e9f247c3b2a4035415a9172fd0f1b71658a320a8", size = 177915, upload-time = "2026-01-10T09:23:21.458Z" }, + { url = "https://files.pythonhosted.org/packages/8a/f1/ae6b937bf3126b5134ce1f482365fde31a357c784ac51852978768b5eff4/websockets-16.0-cp314-cp314-win_amd64.whl", hash = "sha256:c0ee0e63f23914732c6d7e0cce24915c48f3f1512ec1d079ed01fc629dab269d", size = 178381, upload-time = "2026-01-10T09:23:22.715Z" }, + { url = "https://files.pythonhosted.org/packages/06/9b/f791d1db48403e1f0a27577a6beb37afae94254a8c6f08be4a23e4930bc0/websockets-16.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:a35539cacc3febb22b8f4d4a99cc79b104226a756aa7400adc722e83b0d03244", size = 177737, upload-time = "2026-01-10T09:23:24.523Z" }, + { url = "https://files.pythonhosted.org/packages/bd/40/53ad02341fa33b3ce489023f635367a4ac98b73570102ad2cdd770dacc9a/websockets-16.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b784ca5de850f4ce93ec85d3269d24d4c82f22b7212023c974c401d4980ebc5e", size = 175268, upload-time = "2026-01-10T09:23:25.781Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/6158d4e459b984f949dcbbb0c5d270154c7618e11c01029b9bbd1bb4c4f9/websockets-16.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:569d01a4e7fba956c5ae4fc988f0d4e187900f5497ce46339c996dbf24f17641", size = 175486, upload-time = "2026-01-10T09:23:27.033Z" }, + { url = "https://files.pythonhosted.org/packages/e5/2d/7583b30208b639c8090206f95073646c2c9ffd66f44df967981a64f849ad/websockets-16.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:50f23cdd8343b984957e4077839841146f67a3d31ab0d00e6b824e74c5b2f6e8", size = 185331, upload-time = "2026-01-10T09:23:28.259Z" }, + { url = "https://files.pythonhosted.org/packages/45/b0/cce3784eb519b7b5ad680d14b9673a31ab8dcb7aad8b64d81709d2430aa8/websockets-16.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:152284a83a00c59b759697b7f9e9cddf4e3c7861dd0d964b472b70f78f89e80e", size = 186501, upload-time = "2026-01-10T09:23:29.449Z" }, + { url = "https://files.pythonhosted.org/packages/19/60/b8ebe4c7e89fb5f6cdf080623c9d92789a53636950f7abacfc33fe2b3135/websockets-16.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bc59589ab64b0022385f429b94697348a6a234e8ce22544e3681b2e9331b5944", size = 186062, upload-time = "2026-01-10T09:23:31.368Z" }, + { url = "https://files.pythonhosted.org/packages/88/a8/a080593f89b0138b6cba1b28f8df5673b5506f72879322288b031337c0b8/websockets-16.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:32da954ffa2814258030e5a57bc73a3635463238e797c7375dc8091327434206", size = 185356, upload-time = "2026-01-10T09:23:32.627Z" }, + { url = "https://files.pythonhosted.org/packages/c2/b6/b9afed2afadddaf5ebb2afa801abf4b0868f42f8539bfe4b071b5266c9fe/websockets-16.0-cp314-cp314t-win32.whl", hash = "sha256:5a4b4cc550cb665dd8a47f868c8d04c8230f857363ad3c9caf7a0c3bf8c61ca6", size = 178085, upload-time = "2026-01-10T09:23:33.816Z" }, + { url = "https://files.pythonhosted.org/packages/9f/3e/28135a24e384493fa804216b79a6a6759a38cc4ff59118787b9fb693df93/websockets-16.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b14dc141ed6d2dde437cddb216004bcac6a1df0935d79656387bd41632ba0bbd", size = 178531, upload-time = "2026-01-10T09:23:35.016Z" }, + { url = "https://files.pythonhosted.org/packages/72/07/c98a68571dcf256e74f1f816b8cc5eae6eb2d3d5cfa44d37f801619d9166/websockets-16.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:349f83cd6c9a415428ee1005cadb5c2c56f4389bc06a9af16103c3bc3dcc8b7d", size = 174947, upload-time = "2026-01-10T09:23:36.166Z" }, + { url = "https://files.pythonhosted.org/packages/7e/52/93e166a81e0305b33fe416338be92ae863563fe7bce446b0f687b9df5aea/websockets-16.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:4a1aba3340a8dca8db6eb5a7986157f52eb9e436b74813764241981ca4888f03", size = 175260, upload-time = "2026-01-10T09:23:37.409Z" }, + { url = "https://files.pythonhosted.org/packages/56/0c/2dbf513bafd24889d33de2ff0368190a0e69f37bcfa19009ef819fe4d507/websockets-16.0-pp311-pypy311_pp73-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f4a32d1bd841d4bcbffdcb3d2ce50c09c3909fbead375ab28d0181af89fd04da", size = 176071, upload-time = "2026-01-10T09:23:39.158Z" }, + { url = "https://files.pythonhosted.org/packages/a5/8f/aea9c71cc92bf9b6cc0f7f70df8f0b420636b6c96ef4feee1e16f80f75dd/websockets-16.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0298d07ee155e2e9fda5be8a9042200dd2e3bb0b8a38482156576f863a9d457c", size = 176968, upload-time = "2026-01-10T09:23:41.031Z" }, + { url = "https://files.pythonhosted.org/packages/9a/3f/f70e03f40ffc9a30d817eef7da1be72ee4956ba8d7255c399a01b135902a/websockets-16.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:a653aea902e0324b52f1613332ddf50b00c06fdaf7e92624fbf8c77c78fa5767", size = 178735, upload-time = "2026-01-10T09:23:42.259Z" }, + { url = "https://files.pythonhosted.org/packages/6f/28/258ebab549c2bf3e64d2b0217b973467394a9cea8c42f70418ca2c5d0d2e/websockets-16.0-py3-none-any.whl", hash = "sha256:1637db62fad1dc833276dded54215f2c7fa46912301a24bd94d45d46a011ceec", size = 171598, upload-time = "2026-01-10T09:23:45.395Z" }, +] + [[package]] name = "zipp" version = "3.23.0" From 102be7d9675189743d1806bc7ccc0965d47f8faa Mon Sep 17 00:00:00 2001 From: "alexandre.abouchahine" Date: Tue, 3 Feb 2026 15:40:36 +0100 Subject: [PATCH 03/42] add new audio params --- .speakeasy/gen.lock | 79 ++++++++----------- .speakeasy/workflow.lock | 11 ++- docs/models/audiotranscriptionrequest.md | 2 + .../models/audiotranscriptionrequeststream.md | 2 + docs/models/timestampgranularity.md | 3 +- docs/models/transcriptionsegmentchunk.md | 2 + .../models/transcriptionstreamsegmentdelta.md | 1 + docs/sdks/transcriptions/README.md | 8 +- .../audio/transcription_diarize_async.py | 4 +- pyproject.toml | 2 + scripts/run_examples.sh | 3 +- src/mistralai/audio.py | 14 ++-- .../models/audiotranscriptionrequest.py | 8 ++ .../models/audiotranscriptionrequeststream.py | 8 ++ src/mistralai/models/timestampgranularity.py | 5 +- .../models/transcriptionsegmentchunk.py | 43 +++++++++- .../models/transcriptionstreamsegmentdelta.py | 40 +++++++++- src/mistralai/transcriptions.py | 24 ++++++ uv.lock | 2 + 19 files changed, 190 insertions(+), 71 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 9d51b30a..f6c0f0a2 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,7 +1,7 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: cc385dce976ac06e6d062e992f0ee380 + docChecksum: e4b3b07fe28f4666261325e923d6c5d9 docVersion: 1.0.0 speakeasyVersion: 1.685.0 generationVersion: 2.794.1 @@ -11,9 +11,9 @@ management: installationURL: https://github.com/mistralai/client-python.git published: true persistentEdits: - generation_id: 8b0735b6-5924-48f1-ade2-47cb374c76bc - pristine_commit_hash: a9971b936f50486e2e4ceef95d0b2c4708633219 - pristine_tree_hash: 51b8a57de0bf62da607fe0023eec1124458ebee9 + generation_id: 00cab5ea-60fa-456d-ad3f-1ae32427d619 + pristine_commit_hash: b6e4b5c0cd6a42df18b2e7aa44ac696d48576d06 + pristine_tree_hash: b358b046bcef8a5f9b8898d98a4d9fbf82b52e6e features: python: additionalDependencies: 1.0.0 @@ -293,12 +293,12 @@ trackedFiles: pristine_git_object: d174ab9959cadde659f76db94ed87c743e0f6783 docs/models/audiotranscriptionrequest.md: id: ebf59641bc84 - last_write_checksum: sha1:b76d6e7ee3f1a0ca96e1064db61896e287027711 - pristine_git_object: f2e17dd35eda24a48b0c105ecce63a73d754e051 + last_write_checksum: sha1:c55c97a06726812323a031897beffbb160021c05 + pristine_git_object: d7f5bd51b1289f0eb481d86a71bb483ee50bbc40 docs/models/audiotranscriptionrequeststream.md: id: 79b5f721b753 - last_write_checksum: sha1:e8fc60f874bb7e8ee03c4e05bdf88b2db1afbfaf - pristine_git_object: 975e437a299efb27c069812f424a0107999de640 + last_write_checksum: sha1:df6825c05b5a02dcf904ebaa40fb97e9186248cc + pristine_git_object: 5d64964d1a635da912f2553c306fb8654ebfca2e docs/models/basemodelcard.md: id: 2f62bfbd650e last_write_checksum: sha1:7ee94bd9ceb6af84024863aa8183540bee7ffcce @@ -1533,8 +1533,8 @@ trackedFiles: pristine_git_object: c7a0d5c9811ea37aaf9e16b6e93c833ab979573f docs/models/timestampgranularity.md: id: eb4d5a8e6f08 - last_write_checksum: sha1:c2134d9f1f96d4eef48cedfe2b93eb061d5ea47f - pristine_git_object: 0d2a8054590463a167f69c36c00b8f2fc3c7906d + last_write_checksum: sha1:e256a5e8c6010d500841295b89d88d0eface3b88 + pristine_git_object: d20012ea9925446c16c9162304642ba48391d34d docs/models/tool.md: id: 8966139dbeed last_write_checksum: sha1:1725bf53fc9f1ca3f332322d91de24c9d58adc6a @@ -1653,8 +1653,8 @@ trackedFiles: pristine_git_object: 1bc0189c5d1833c946a71c9773346e21b08d2404 docs/models/transcriptionsegmentchunk.md: id: f09db8b2273e - last_write_checksum: sha1:c94ef1aa3dc2568ec77d186fa9061598f0ebccf1 - pristine_git_object: bebc9f72f521cf9cbd1818d53239cd632a025a31 + last_write_checksum: sha1:b89ee132a3c63e56806f3f395c98a9e7e5e9c7d0 + pristine_git_object: f620b96a75a0b9c6e015ae1f460dcccb80d113ee docs/models/transcriptionstreamdone.md: id: 2253923d93cf last_write_checksum: sha1:043ebcd284007f8c8536f2726ec5f525abffeb6b @@ -1685,8 +1685,8 @@ trackedFiles: pristine_git_object: e93521e10d43299676f44c8297608cc94c6106e6 docs/models/transcriptionstreamsegmentdelta.md: id: f59c3fb696f2 - last_write_checksum: sha1:d44b6c1359c0ed504f97edb46b3acf0145967fe7 - pristine_git_object: 3deeedf067c833cae8df1ab366a2e54b3f9e9186 + last_write_checksum: sha1:4d03e881a4ad9c3bed6075bb8e25d00af391652c + pristine_git_object: 2ab32f9783f6645bba7603279c03db4465c70fff docs/models/transcriptionstreamsegmentdeltatype.md: id: 03ee222a3afd last_write_checksum: sha1:d02b5f92cf2d8182aeaa8dd3428b988ab4fc0fad @@ -1841,8 +1841,8 @@ trackedFiles: pristine_git_object: efcb99314c7d07a3dc556c297333046fc5d9e097 docs/sdks/transcriptions/README.md: id: 089cf94ecf47 - last_write_checksum: sha1:fdf785e4cbab20aec41122735435a38f582f7f29 - pristine_git_object: 3243258c4debd94e10c98c2b18dcc47838143a5b + last_write_checksum: sha1:01e68371b7a94cb35d6435efd3ef9247e8c27a94 + pristine_git_object: dabab00e85a3f480c8dc3dd7b792e68420ae08b6 py.typed: id: 258c3ed47ae4 last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 @@ -1881,8 +1881,8 @@ trackedFiles: pristine_git_object: 73e4ee3c885f7c3472a9dc5c0546c02d4e19a1c4 src/mistralai/audio.py: id: c398f6a11e24 - last_write_checksum: sha1:11f9713b4f970509cffe0e6122c61f9aeafc9e73 - pristine_git_object: 5687abdb5676903661a33a3bee115f289f5fe9df + last_write_checksum: sha1:aa75fa00e00d8059121d8de60844d70d50203661 + pristine_git_object: 3de29053f34654907c423ca6600f216f6b0dcbe0 src/mistralai/basesdk.py: id: 3127264590ce last_write_checksum: sha1:5340f1c5976fd87d3b17b285535b63bbbe7db120 @@ -2089,12 +2089,12 @@ trackedFiles: pristine_git_object: 48ab648c3525fcc9fe1c722b7beee0f649e30e7a src/mistralai/models/audiotranscriptionrequest.py: id: 4c6a6fee484a - last_write_checksum: sha1:d8fb192581056b4ae053f9e6919874850462cb03 - pristine_git_object: 308e2599f4ba8878b0fc20ee2660289b55ae7c9a + last_write_checksum: sha1:8dd41335ffd46dd1099bdb20baac32d043c5936c + pristine_git_object: 86417b4235292de3ab1d2b46116ce0ba94010087 src/mistralai/models/audiotranscriptionrequeststream.py: id: 863eca721e72 - last_write_checksum: sha1:a7ec74e5e05a705f2d61d1fe8a635178bcea3cd6 - pristine_git_object: 04374503f931f3964851d09def70535276bdf194 + last_write_checksum: sha1:010618236f3da1c99d63d334266622cf84e6b09f + pristine_git_object: 1f4087e8d33c8a3560d5ce58f2a1a7bc4627556b src/mistralai/models/basemodelcard.py: id: 5554644ee6f2 last_write_checksum: sha1:aa5af32cda04d45bcdf2c8fb380529c4fbc828aa @@ -2869,8 +2869,8 @@ trackedFiles: pristine_git_object: 627ae4883698696774b7a285a73326a4509c6828 src/mistralai/models/timestampgranularity.py: id: e0cb6c4efa2a - last_write_checksum: sha1:2b554048013632407c391444d972e29362751468 - pristine_git_object: 02816df67dd326a17d27dc815c49c6e1172693b8 + last_write_checksum: sha1:68ea11a4e27f23b2fcc976d0a8eeb95f6f28ba85 + pristine_git_object: 5bda890f500228ba0c3dc234edf09906b88cb522 src/mistralai/models/tool.py: id: c0a9b60b6cf1 last_write_checksum: sha1:805030012b6cf4d6159c1515b44e1c999ea2349a @@ -2929,8 +2929,8 @@ trackedFiles: pristine_git_object: 54a98a5ba7b83a6b7f6a39046b400a61e9889898 src/mistralai/models/transcriptionsegmentchunk.py: id: ccd6d5675b49 - last_write_checksum: sha1:367abd8a8182d9db9f2b19540aed2b974ad7bbe2 - pristine_git_object: aa30f053a624b25c7fd1739c05f406a81873ff60 + last_write_checksum: sha1:01b1c1c52a1e324c8f874586cdd0349fed35443c + pristine_git_object: 40ad20b3abc2f0b2c0d2d695ba89237f66cc0b2b src/mistralai/models/transcriptionstreamdone.py: id: 42177659bf0f last_write_checksum: sha1:5fda2b766b2af41749006835e45c95f708eddb28 @@ -2949,8 +2949,8 @@ trackedFiles: pristine_git_object: 15b7514415e536bb04fd1a69ccea20615b5b1fcf src/mistralai/models/transcriptionstreamsegmentdelta.py: id: 83d02b065099 - last_write_checksum: sha1:1f48714d450fff004f9cf24b81749848240fe722 - pristine_git_object: d779ed837913c8c13a4599a06a2ed75afa827a48 + last_write_checksum: sha1:3f70d4d58d8fedb784d056425662e7dc2f9ed244 + pristine_git_object: 550c83e7073bc99fdac6a0d59c5c30daa9d35f43 src/mistralai/models/transcriptionstreamtextdelta.py: id: ce0861d8affd last_write_checksum: sha1:84a3b6c6d84a896e59e2874de59d812d3db657a5 @@ -3017,8 +3017,8 @@ trackedFiles: pristine_git_object: 7e77925ddc9aa01c4e5f8ba2ca52aa7f32e89859 src/mistralai/transcriptions.py: id: ba6b040274f2 - last_write_checksum: sha1:079bcd1c4a6b1d74e97cc6d77bccf4eea1232cd7 - pristine_git_object: bdbeb1ccbb938c825e5c3371a0f761a90a6e17b8 + last_write_checksum: sha1:0cd336f14cccb581ff955feaf8bc6f7df185f27b + pristine_git_object: 90f2e58a3677e922cb5c8aac4b30d5e697ef2f05 src/mistralai/types/__init__.py: id: b89b8375c971 last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed @@ -3824,14 +3824,14 @@ examples: application/json: {"model": "Beetle", "text": "", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "language": ""} userExample: requestBody: - multipart/form-data: {"model": "voxtral-mini-latest", "stream": false} + multipart/form-data: {"model": "voxtral-mini-latest", "stream": false, "diarize": false} responses: "200": application/json: {"model": "voxtral-mini-2507", "text": "This week, I traveled to Chicago to deliver my final farewell address to the nation, following in the tradition of presidents before me. It was an opportunity to say thank you. Whether we've seen eye to eye or rarely agreed at all, my conversations with you, the American people, in living rooms, in schools, at farms and on factory floors, at diners and on distant military outposts, All these conversations are what have kept me honest, kept me inspired, and kept me going. Every day, I learned from you. You made me a better President, and you made me a better man.\nOver the course of these eight years, I've seen the goodness, the resilience, and the hope of the American people. I've seen neighbors looking out for each other as we rescued our economy from the worst crisis of our lifetimes. I've hugged cancer survivors who finally know the security of affordable health care. I've seen communities like Joplin rebuild from disaster, and cities like Boston show the world that no terrorist will ever break the American spirit. I've seen the hopeful faces of young graduates and our newest military officers. I've mourned with grieving families searching for answers. And I found grace in a Charleston church. I've seen our scientists help a paralyzed man regain his sense of touch, and our wounded warriors walk again. I've seen our doctors and volunteers rebuild after earthquakes and stop pandemics in their tracks. I've learned from students who are building robots and curing diseases, and who will change the world in ways we can't even imagine. I've seen the youngest of children remind us of our obligations to care for our refugees, to work in peace, and above all, to look out for each other.\nThat's what's possible when we come together in the slow, hard, sometimes frustrating, but always vital work of self-government. But we can't take our democracy for granted. All of us, regardless of party, should throw ourselves into the work of citizenship. Not just when there is an election. Not just when our own narrow interest is at stake. But over the full span of a lifetime. If you're tired of arguing with strangers on the Internet, try to talk with one in real life. If something needs fixing, lace up your shoes and do some organizing. If you're disappointed by your elected officials, then grab a clipboard, get some signatures, and run for office yourself.\nOur success depends on our participation, regardless of which way the pendulum of power swings. It falls on each of us to be guardians of our democracy, to embrace the joyous task we've been given to continually try to improve this great nation of ours. Because for all our outward differences, we all share the same proud title – citizen.\nIt has been the honor of my life to serve you as President. Eight years later, I am even more optimistic about our country's promise. And I look forward to working along your side as a citizen for all my days that remain.\nThanks, everybody. God bless you. And God bless the United States of America.\n", "segments": [], "usage": {"prompt_tokens": 4, "completion_tokens": 635, "total_tokens": 3264, "prompt_audio_seconds": 203}, "language": "en"} audio_api_v1_transcriptions_post_stream: speakeasy-default-audio-api-v1-transcriptions-post-stream: requestBody: - multipart/form-data: {"model": "Camry", "stream": true} + multipart/form-data: {"model": "Camry", "stream": true, "diarize": false} agents_api_v1_conversations_delete: speakeasy-default-agents-api-v1-conversations-delete: parameters: @@ -3897,21 +3897,6 @@ examples: application/json: {} examplesVersion: 1.0.2 generatedTests: {} -releaseNotes: | - ## Python SDK Changes: - * `mistral.beta.conversations.restart_stream()`: `request.agent_version` **Changed** **Breaking** :warning: - * `mistral.beta.conversations.start()`: `request.agent_version` **Changed** **Breaking** :warning: - * `mistral.beta.conversations.list()`: `response.[].[agent_conversation].agent_version` **Changed** **Breaking** :warning: - * `mistral.beta.conversations.get()`: `response.[agent_conversation].agent_version` **Changed** **Breaking** :warning: - * `mistral.beta.conversations.restart()`: `request.agent_version` **Changed** **Breaking** :warning: - * `mistral.beta.conversations.start_stream()`: `request.agent_version` **Changed** **Breaking** :warning: - * `mistral.beta.agents.get()`: `request.agent_version` **Changed** **Breaking** :warning: - * `mistral.beta.agents.get_version()`: `request.version` **Changed** **Breaking** :warning: - * `mistral.beta.agents.list_version_aliases()`: **Added** - * `mistral.models.list()`: `response.data.[].[fine-tuned].capabilities.audio_transcription` **Added** - * `mistral.models.retrieve()`: `response.[base].capabilities.audio_transcription` **Added** - * `mistral.beta.agents.create_version_alias()`: **Added** - * `mistral.files.list()`: `request.mimetypes` **Added** generatedFiles: - .gitattributes - .vscode/settings.json diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 3bb067a0..bb904c64 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,10 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:56bcbb02148ddfabd64cb2dced1a9efe0f00d0fa106435fdd0fb2a889c1c6fed - sourceBlobDigest: sha256:c014d9220f14e04b573acf291c173954b8d34d03d852877a91756afb68ccc65b + sourceRevisionDigest: sha256:4e49849eba5334a3fe4a3d081baa9afdecd8f41dfc4c2a5115bc19ead4d92d13 + sourceBlobDigest: sha256:3ab3c61ac6a4e9fab37d924d516838ca27dd7e57a1b5e9059d4db2ef29efec56 tags: - latest - - speakeasy-sdk-regen-1769979831 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +36,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:56bcbb02148ddfabd64cb2dced1a9efe0f00d0fa106435fdd0fb2a889c1c6fed - sourceBlobDigest: sha256:c014d9220f14e04b573acf291c173954b8d34d03d852877a91756afb68ccc65b + sourceRevisionDigest: sha256:4e49849eba5334a3fe4a3d081baa9afdecd8f41dfc4c2a5115bc19ead4d92d13 + sourceBlobDigest: sha256:3ab3c61ac6a4e9fab37d924d516838ca27dd7e57a1b5e9059d4db2ef29efec56 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:feb7bf2f6fab8456316453c7e14eda6201fe8649fe0ffcdb1eaa4580cc66a51e + codeSamplesRevisionDigest: sha256:8fa56ecd9dd6e5f831fb96c4cfd00c65f617a03ff67f876d75ecdf28cb5bbf3c workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.685.0 diff --git a/docs/models/audiotranscriptionrequest.md b/docs/models/audiotranscriptionrequest.md index f2e17dd3..d7f5bd51 100644 --- a/docs/models/audiotranscriptionrequest.md +++ b/docs/models/audiotranscriptionrequest.md @@ -12,4 +12,6 @@ | `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | | `stream` | *Optional[Literal[False]]* | :heavy_minus_sign: | N/A | | +| `diarize` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `context_bias` | List[*str*] | :heavy_minus_sign: | N/A | | | `timestamp_granularities` | List[[models.TimestampGranularity](../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | | \ No newline at end of file diff --git a/docs/models/audiotranscriptionrequeststream.md b/docs/models/audiotranscriptionrequeststream.md index 975e437a..5d64964d 100644 --- a/docs/models/audiotranscriptionrequeststream.md +++ b/docs/models/audiotranscriptionrequeststream.md @@ -12,4 +12,6 @@ | `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `stream` | *Optional[Literal[True]]* | :heavy_minus_sign: | N/A | +| `diarize` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `context_bias` | List[*str*] | :heavy_minus_sign: | N/A | | `timestamp_granularities` | List[[models.TimestampGranularity](../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | \ No newline at end of file diff --git a/docs/models/timestampgranularity.md b/docs/models/timestampgranularity.md index 0d2a8054..d20012ea 100644 --- a/docs/models/timestampgranularity.md +++ b/docs/models/timestampgranularity.md @@ -5,4 +5,5 @@ | Name | Value | | --------- | --------- | -| `SEGMENT` | segment | \ No newline at end of file +| `SEGMENT` | segment | +| `WORD` | word | \ No newline at end of file diff --git a/docs/models/transcriptionsegmentchunk.md b/docs/models/transcriptionsegmentchunk.md index bebc9f72..f620b96a 100644 --- a/docs/models/transcriptionsegmentchunk.md +++ b/docs/models/transcriptionsegmentchunk.md @@ -8,5 +8,7 @@ | `text` | *str* | :heavy_check_mark: | N/A | | `start` | *float* | :heavy_check_mark: | N/A | | `end` | *float* | :heavy_check_mark: | N/A | +| `score` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | | `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamsegmentdelta.md b/docs/models/transcriptionstreamsegmentdelta.md index 3deeedf0..2ab32f97 100644 --- a/docs/models/transcriptionstreamsegmentdelta.md +++ b/docs/models/transcriptionstreamsegmentdelta.md @@ -8,5 +8,6 @@ | `text` | *str* | :heavy_check_mark: | N/A | | `start` | *float* | :heavy_check_mark: | N/A | | `end` | *float* | :heavy_check_mark: | N/A | +| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `type` | [Optional[models.TranscriptionStreamSegmentDeltaType]](../models/transcriptionstreamsegmentdeltatype.md) | :heavy_minus_sign: | N/A | | `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/sdks/transcriptions/README.md b/docs/sdks/transcriptions/README.md index 3243258c..dabab00e 100644 --- a/docs/sdks/transcriptions/README.md +++ b/docs/sdks/transcriptions/README.md @@ -25,7 +25,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.audio.transcriptions.complete(model="Model X") + res = mistral.audio.transcriptions.complete(model="Model X", diarize=False) # Handle response print(res) @@ -42,6 +42,8 @@ with Mistral( | `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | | | `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `diarize` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `context_bias` | List[*str*] | :heavy_minus_sign: | N/A | | | `timestamp_granularities` | List[[models.TimestampGranularity](../../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | @@ -71,7 +73,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.audio.transcriptions.stream(model="Camry") + res = mistral.audio.transcriptions.stream(model="Camry", diarize=False) with res as event_stream: for event in event_stream: @@ -90,6 +92,8 @@ with Mistral( | `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | | `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `diarize` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `context_bias` | List[*str*] | :heavy_minus_sign: | N/A | | `timestamp_granularities` | List[[models.TimestampGranularity](../../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | diff --git a/examples/mistral/audio/transcription_diarize_async.py b/examples/mistral/audio/transcription_diarize_async.py index ef5323f4..26754837 100644 --- a/examples/mistral/audio/transcription_diarize_async.py +++ b/examples/mistral/audio/transcription_diarize_async.py @@ -2,15 +2,17 @@ import os import asyncio +import pathlib from mistralai import Mistral, File +fixture_dir = pathlib.Path(__file__).parents[2] / "fixtures" async def main(): api_key = os.environ["MISTRAL_API_KEY"] model = "voxtral-mini-2602" client = Mistral(api_key=api_key) - with open("examples/fixtures/bcn_weather.mp3", "rb") as f: + with open(fixture_dir / "bcn_weather.mp3", "rb") as f: response = await client.audio.transcriptions.complete_async( model=model, file=File(content=f, file_name=f.name), diff --git a/pyproject.toml b/pyproject.toml index dbb5d44a..ef338022 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,6 +48,7 @@ dev = [ "mcp>=1.0,<2", "griffe>=1.7.3,<2", "authlib>=1.5.2,<2", + "websockets >=13.0", ] lint = [ "ruff>=0.11.10,<0.12", @@ -106,6 +107,7 @@ module = [ "jsonpath.*", "typing_inspect.*", "authlib.*", + "websockets.*", "mcp.*", "griffe.*" ] diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index 5191033a..40c6d17e 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -39,10 +39,9 @@ exclude_files=( "examples/mistral/mcp_servers/stdio_server.py" "examples/mistral/agents/async_conversation_run_stream.py" "examples/mistral/agents/async_conversation_run_mcp.py" - "examples/mistral/agents/async_conversation_run_mcp_remote.py" + "examples/mistral/agents/async_conversation_run_mcp_remote.py" "examples/mistral/audio/async_realtime_transcription_microphone.py" "examples/mistral/audio/async_realtime_transcription_stream.py" - "examples/mistral/audio/transcription_diarize_async.py" ) # Check if the no-extra-dep flag is set diff --git a/src/mistralai/audio.py b/src/mistralai/audio.py index 54430d49..3de29053 100644 --- a/src/mistralai/audio.py +++ b/src/mistralai/audio.py @@ -1,5 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.transcriptions import Transcriptions +from typing import Optional + # region imports from typing import TYPE_CHECKING @@ -7,11 +12,6 @@ from mistralai.extra.realtime import RealtimeTranscription # endregion imports -from .basesdk import BaseSDK -from .sdkconfiguration import SDKConfiguration -from mistralai.transcriptions import Transcriptions -from typing import Optional - class Audio(BaseSDK): transcriptions: Transcriptions @@ -34,9 +34,9 @@ def _init_sdks(self): def realtime(self) -> "RealtimeTranscription": """Returns a client for real-time audio transcription via WebSocket.""" if not hasattr(self, "_realtime"): - from mistralai.extra.realtime import RealtimeTranscription + from mistralai.extra.realtime import RealtimeTranscription # pylint: disable=import-outside-toplevel - self._realtime = RealtimeTranscription(self.sdk_configuration) + self._realtime = RealtimeTranscription(self.sdk_configuration) # pylint: disable=attribute-defined-outside-init return self._realtime diff --git a/src/mistralai/models/audiotranscriptionrequest.py b/src/mistralai/models/audiotranscriptionrequest.py index 308e2599..86417b42 100644 --- a/src/mistralai/models/audiotranscriptionrequest.py +++ b/src/mistralai/models/audiotranscriptionrequest.py @@ -24,6 +24,8 @@ class AudioTranscriptionRequestTypedDict(TypedDict): r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" temperature: NotRequired[Nullable[float]] stream: Literal[False] + diarize: NotRequired[bool] + context_bias: NotRequired[List[str]] timestamp_granularities: NotRequired[List[TimestampGranularity]] r"""Granularities of timestamps to include in the response.""" @@ -55,6 +57,10 @@ class AudioTranscriptionRequest(BaseModel): FieldMetadata(multipart=True), ] = False + diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False + + context_bias: Annotated[Optional[List[str]], FieldMetadata(multipart=True)] = None + timestamp_granularities: Annotated[ Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) ] = None @@ -69,6 +75,8 @@ def serialize_model(self, handler): "language", "temperature", "stream", + "diarize", + "context_bias", "timestamp_granularities", ] nullable_fields = ["file_url", "file_id", "language", "temperature"] diff --git a/src/mistralai/models/audiotranscriptionrequeststream.py b/src/mistralai/models/audiotranscriptionrequeststream.py index 04374503..1f4087e8 100644 --- a/src/mistralai/models/audiotranscriptionrequeststream.py +++ b/src/mistralai/models/audiotranscriptionrequeststream.py @@ -23,6 +23,8 @@ class AudioTranscriptionRequestStreamTypedDict(TypedDict): r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" temperature: NotRequired[Nullable[float]] stream: Literal[True] + diarize: NotRequired[bool] + context_bias: NotRequired[List[str]] timestamp_granularities: NotRequired[List[TimestampGranularity]] r"""Granularities of timestamps to include in the response.""" @@ -53,6 +55,10 @@ class AudioTranscriptionRequestStream(BaseModel): FieldMetadata(multipart=True), ] = True + diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False + + context_bias: Annotated[Optional[List[str]], FieldMetadata(multipart=True)] = None + timestamp_granularities: Annotated[ Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) ] = None @@ -67,6 +73,8 @@ def serialize_model(self, handler): "language", "temperature", "stream", + "diarize", + "context_bias", "timestamp_granularities", ] nullable_fields = ["file_url", "file_id", "language", "temperature"] diff --git a/src/mistralai/models/timestampgranularity.py b/src/mistralai/models/timestampgranularity.py index 02816df6..5bda890f 100644 --- a/src/mistralai/models/timestampgranularity.py +++ b/src/mistralai/models/timestampgranularity.py @@ -4,4 +4,7 @@ from typing import Literal -TimestampGranularity = Literal["segment",] +TimestampGranularity = Literal[ + "segment", + "word", +] diff --git a/src/mistralai/models/transcriptionsegmentchunk.py b/src/mistralai/models/transcriptionsegmentchunk.py index aa30f053..40ad20b3 100644 --- a/src/mistralai/models/transcriptionsegmentchunk.py +++ b/src/mistralai/models/transcriptionsegmentchunk.py @@ -1,9 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL import pydantic -from pydantic import ConfigDict +from pydantic import ConfigDict, model_serializer from typing import Any, Dict, Literal, Optional from typing_extensions import NotRequired, TypedDict @@ -15,6 +15,8 @@ class TranscriptionSegmentChunkTypedDict(TypedDict): text: str start: float end: float + score: NotRequired[Nullable[float]] + speaker_id: NotRequired[Nullable[str]] type: NotRequired[Type] @@ -30,6 +32,10 @@ class TranscriptionSegmentChunk(BaseModel): end: float + score: OptionalNullable[float] = UNSET + + speaker_id: OptionalNullable[str] = UNSET + type: Optional[Type] = "transcription_segment" @property @@ -39,3 +45,36 @@ def additional_properties(self): @additional_properties.setter def additional_properties(self, value): self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["score", "speaker_id", "type"] + nullable_fields = ["score", "speaker_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/models/transcriptionstreamsegmentdelta.py b/src/mistralai/models/transcriptionstreamsegmentdelta.py index d779ed83..550c83e7 100644 --- a/src/mistralai/models/transcriptionstreamsegmentdelta.py +++ b/src/mistralai/models/transcriptionstreamsegmentdelta.py @@ -1,9 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL import pydantic -from pydantic import ConfigDict +from pydantic import ConfigDict, model_serializer from typing import Any, Dict, Literal, Optional from typing_extensions import NotRequired, TypedDict @@ -15,6 +15,7 @@ class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): text: str start: float end: float + speaker_id: NotRequired[Nullable[str]] type: NotRequired[TranscriptionStreamSegmentDeltaType] @@ -30,6 +31,8 @@ class TranscriptionStreamSegmentDelta(BaseModel): end: float + speaker_id: OptionalNullable[str] = UNSET + type: Optional[TranscriptionStreamSegmentDeltaType] = "transcription.segment" @property @@ -39,3 +42,36 @@ def additional_properties(self): @additional_properties.setter def additional_properties(self, value): self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["speaker_id", "type"] + nullable_fields = ["speaker_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/transcriptions.py b/src/mistralai/transcriptions.py index bdbeb1cc..90f2e58a 100644 --- a/src/mistralai/transcriptions.py +++ b/src/mistralai/transcriptions.py @@ -25,6 +25,8 @@ def complete( file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, timestamp_granularities: Optional[ List[models_timestampgranularity.TimestampGranularity] ] = None, @@ -41,6 +43,8 @@ def complete( :param file_id: ID of a file uploaded to /v1/files :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. :param temperature: + :param diarize: + :param context_bias: :param timestamp_granularities: Granularities of timestamps to include in the response. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -64,6 +68,8 @@ def complete( file_id=file_id, language=language, temperature=temperature, + diarize=diarize, + context_bias=context_bias, timestamp_granularities=timestamp_granularities, ) @@ -130,6 +136,8 @@ async def complete_async( file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, timestamp_granularities: Optional[ List[models_timestampgranularity.TimestampGranularity] ] = None, @@ -146,6 +154,8 @@ async def complete_async( :param file_id: ID of a file uploaded to /v1/files :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. :param temperature: + :param diarize: + :param context_bias: :param timestamp_granularities: Granularities of timestamps to include in the response. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -169,6 +179,8 @@ async def complete_async( file_id=file_id, language=language, temperature=temperature, + diarize=diarize, + context_bias=context_bias, timestamp_granularities=timestamp_granularities, ) @@ -235,6 +247,8 @@ def stream( file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, timestamp_granularities: Optional[ List[models_timestampgranularity.TimestampGranularity] ] = None, @@ -251,6 +265,8 @@ def stream( :param file_id: ID of a file uploaded to /v1/files :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. :param temperature: + :param diarize: + :param context_bias: :param timestamp_granularities: Granularities of timestamps to include in the response. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -274,6 +290,8 @@ def stream( file_id=file_id, language=language, temperature=temperature, + diarize=diarize, + context_bias=context_bias, timestamp_granularities=timestamp_granularities, ) @@ -350,6 +368,8 @@ async def stream_async( file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, timestamp_granularities: Optional[ List[models_timestampgranularity.TimestampGranularity] ] = None, @@ -366,6 +386,8 @@ async def stream_async( :param file_id: ID of a file uploaded to /v1/files :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. :param temperature: + :param diarize: + :param context_bias: :param timestamp_granularities: Granularities of timestamps to include in the response. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -389,6 +411,8 @@ async def stream_async( file_id=file_id, language=language, temperature=temperature, + diarize=diarize, + context_bias=context_bias, timestamp_granularities=timestamp_granularities, ) diff --git a/uv.lock b/uv.lock index 85e04bd9..7158ecbd 100644 --- a/uv.lock +++ b/uv.lock @@ -605,6 +605,7 @@ dev = [ { name = "types-authlib" }, { name = "types-python-dateutil" }, { name = "types-pyyaml" }, + { name = "websockets" }, ] lint = [ { name = "mypy" }, @@ -646,6 +647,7 @@ dev = [ { name = "types-authlib", specifier = ">=1.5.0.20250516,<2" }, { name = "types-python-dateutil", specifier = ">=2.9.0.20240316,<3" }, { name = "types-pyyaml", specifier = ">=6.0.12.20250516,<7" }, + { name = "websockets", specifier = ">=13.0" }, ] lint = [ { name = "mypy", specifier = "==1.15.0" }, From 494ac9b3efd8664ce5de381307d8389b24df828b Mon Sep 17 00:00:00 2001 From: "alexandre.abouchahine" Date: Tue, 3 Feb 2026 16:36:13 +0100 Subject: [PATCH 04/42] bump pyproject version --- .../mistral/agents/async_conversation_run.py | 2 +- pyproject.toml | 2 +- scripts/run_examples.sh | 16 +--------------- uv.lock | 2 +- 4 files changed, 4 insertions(+), 18 deletions(-) diff --git a/examples/mistral/agents/async_conversation_run.py b/examples/mistral/agents/async_conversation_run.py index 9e118037..27f9c870 100644 --- a/examples/mistral/agents/async_conversation_run.py +++ b/examples/mistral/agents/async_conversation_run.py @@ -6,7 +6,7 @@ from mistralai.extra.run.context import RunContext from mistralai.types import BaseModel -MODEL = "mistral-medium-latest" +MODEL = "mistral-medium-2505" def math_question_generator(question_num: int): diff --git a/pyproject.toml b/pyproject.toml index ef338022..2cb90876 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.11.1" +version = "1.12.0" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index 40c6d17e..5bc6fc48 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -6,12 +6,8 @@ RETRY_COUNT=3 # Parse command line arguments while [[ $# -gt 0 ]]; do case $1 in - --no-extra-dep) - NO_EXTRA_DEP=true - shift - ;; --retry-count) - RETRY_COUNT="$2" + RETRY_COUNT="$1" shift 2 ;; --help) @@ -44,16 +40,6 @@ exclude_files=( "examples/mistral/audio/async_realtime_transcription_stream.py" ) -# Check if the no-extra-dep flag is set -if [ "$NO_EXTRA_DEP" = true ]; then - # Add more files to the exclude list - exclude_files+=( - "examples/mistral/agents/async_conversation_run_stream.py" - "examples/mistral/agents/async_conversation_run.py" - "examples/mistral/agents/async_multi_turn_conversation.py" - ) -fi - failed=0 echo "Skipping scripts" diff --git a/uv.lock b/uv.lock index 7158ecbd..fe22e76a 100644 --- a/uv.lock +++ b/uv.lock @@ -563,7 +563,7 @@ wheels = [ [[package]] name = "mistralai" -version = "1.11.1" +version = "1.12.0" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, From 61f82f0a705c7d83d2fefd66d591e4b9cf45f9bc Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:11:08 +0100 Subject: [PATCH 05/42] chore: configure Speakeasy for mistralai.client module - Update version to 2.0.0a1 - Set moduleName to mistralai.client for PEP 420 namespace --- .speakeasy/gen.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 0cc6f059..b47a192d 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -26,7 +26,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.12.0 + version: 2.0.0a1 additionalDependencies: dev: pytest: ^8.2.2 @@ -63,7 +63,7 @@ python: license: "" maxMethodParams: 15 methodArguments: infer-optional-args - moduleName: "" + moduleName: mistralai.client multipartArrayFormat: legacy outputModelSuffix: output packageManager: uv From 79fa300722b6eb889142357a1f14f789c91ba5f5 Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:22:38 +0100 Subject: [PATCH 06/42] chore: remove old generated SDK files Prepare for PEP 420 namespace migration by removing Speakeasy-generated files from src/mistralai/. Custom code in extra/ and _hooks/ is preserved. Speakeasy will regenerate the SDK under src/mistralai/client/. --- src/mistralai/__init__.py | 18 - src/mistralai/_hooks/__init__.py | 5 - src/mistralai/_hooks/sdkhooks.py | 76 - src/mistralai/_hooks/types.py | 113 - src/mistralai/_version.py | 15 - src/mistralai/accesses.py | 619 ---- src/mistralai/agents.py | 725 ----- src/mistralai/async_client.py | 15 - src/mistralai/audio.py | 43 - src/mistralai/basesdk.py | 370 --- src/mistralai/batch.py | 20 - src/mistralai/beta.py | 31 - src/mistralai/chat.py | 835 ----- src/mistralai/classifiers.py | 800 ----- src/mistralai/client.py | 14 - src/mistralai/conversations.py | 2865 ----------------- src/mistralai/documents.py | 1981 ------------ src/mistralai/embeddings.py | 240 -- src/mistralai/files.py | 1120 ------- src/mistralai/fim.py | 545 ---- src/mistralai/fine_tuning.py | 20 - src/mistralai/httpclient.py | 125 - src/mistralai/jobs.py | 1067 ------ src/mistralai/libraries.py | 946 ------ src/mistralai/mistral_agents.py | 2080 ------------ src/mistralai/mistral_jobs.py | 799 ----- src/mistralai/models/__init__.py | 2531 --------------- src/mistralai/models/agent.py | 142 - src/mistralai/models/agentaliasresponse.py | 23 - src/mistralai/models/agentconversation.py | 89 - src/mistralai/models/agentcreationrequest.py | 113 - src/mistralai/models/agenthandoffdoneevent.py | 33 - src/mistralai/models/agenthandoffentry.py | 76 - .../models/agenthandoffstartedevent.py | 33 - ..._api_v1_agents_create_or_update_aliasop.py | 26 - .../models/agents_api_v1_agents_deleteop.py | 16 - .../agents_api_v1_agents_get_versionop.py | 21 - .../models/agents_api_v1_agents_getop.py | 62 - ...ts_api_v1_agents_list_version_aliasesop.py | 16 - .../agents_api_v1_agents_list_versionsop.py | 33 - .../models/agents_api_v1_agents_listop.py | 98 - .../agents_api_v1_agents_update_versionop.py | 21 - .../models/agents_api_v1_agents_updateop.py | 23 - ...ts_api_v1_conversations_append_streamop.py | 28 - .../agents_api_v1_conversations_appendop.py | 28 - .../agents_api_v1_conversations_deleteop.py | 18 - .../agents_api_v1_conversations_getop.py | 35 - .../agents_api_v1_conversations_historyop.py | 18 - .../agents_api_v1_conversations_listop.py | 74 - .../agents_api_v1_conversations_messagesop.py | 18 - ...s_api_v1_conversations_restart_streamop.py | 28 - .../agents_api_v1_conversations_restartop.py | 28 - .../models/agentscompletionrequest.py | 192 -- .../models/agentscompletionstreamrequest.py | 190 -- src/mistralai/models/agentupdaterequest.py | 127 - src/mistralai/models/apiendpoint.py | 22 - src/mistralai/models/archiveftmodelout.py | 23 - src/mistralai/models/assistantmessage.py | 71 - src/mistralai/models/audiochunk.py | 20 - src/mistralai/models/audioencoding.py | 18 - src/mistralai/models/audioformat.py | 17 - .../models/audiotranscriptionrequest.py | 107 - .../models/audiotranscriptionrequeststream.py | 105 - src/mistralai/models/basemodelcard.py | 110 - src/mistralai/models/batcherror.py | 17 - src/mistralai/models/batchjobin.py | 82 - src/mistralai/models/batchjobout.py | 123 - src/mistralai/models/batchjobsout.py | 24 - src/mistralai/models/batchjobstatus.py | 15 - src/mistralai/models/batchrequest.py | 48 - src/mistralai/models/builtinconnectors.py | 13 - .../models/chatclassificationrequest.py | 20 - src/mistralai/models/chatcompletionchoice.py | 33 - src/mistralai/models/chatcompletionrequest.py | 215 -- .../models/chatcompletionresponse.py | 31 - .../models/chatcompletionstreamrequest.py | 217 -- src/mistralai/models/chatmoderationrequest.py | 83 - src/mistralai/models/checkpointout.py | 26 - src/mistralai/models/classificationrequest.py | 68 - .../models/classificationresponse.py | 24 - .../models/classificationtargetresult.py | 14 - .../models/classifierdetailedjobout.py | 158 - src/mistralai/models/classifierftmodelout.py | 108 - src/mistralai/models/classifierjobout.py | 167 - src/mistralai/models/classifiertargetin.py | 55 - src/mistralai/models/classifiertargetout.py | 24 - .../models/classifiertrainingparameters.py | 73 - .../models/classifiertrainingparametersin.py | 85 - src/mistralai/models/codeinterpretertool.py | 17 - src/mistralai/models/completionargs.py | 101 - src/mistralai/models/completionargsstop.py | 13 - src/mistralai/models/completionchunk.py | 34 - .../models/completiondetailedjobout.py | 165 - src/mistralai/models/completionevent.py | 14 - src/mistralai/models/completionftmodelout.py | 104 - src/mistralai/models/completionjobout.py | 178 - .../models/completionresponsestreamchoice.py | 63 - .../models/completiontrainingparameters.py | 78 - .../models/completiontrainingparametersin.py | 90 - src/mistralai/models/contentchunk.py | 42 - .../models/conversationappendrequest.py | 38 - .../models/conversationappendstreamrequest.py | 40 - src/mistralai/models/conversationevents.py | 78 - src/mistralai/models/conversationhistory.py | 59 - src/mistralai/models/conversationinputs.py | 14 - src/mistralai/models/conversationmessages.py | 28 - src/mistralai/models/conversationrequest.py | 154 - src/mistralai/models/conversationresponse.py | 52 - .../models/conversationrestartrequest.py | 107 - .../conversationrestartstreamrequest.py | 111 - .../models/conversationstreamrequest.py | 160 - src/mistralai/models/conversationusageinfo.py | 63 - ...elete_model_v1_models_model_id_deleteop.py | 18 - src/mistralai/models/deletefileout.py | 25 - src/mistralai/models/deletemodelout.py | 26 - src/mistralai/models/deltamessage.py | 61 - src/mistralai/models/documentlibrarytool.py | 22 - src/mistralai/models/documentout.py | 121 - src/mistralai/models/documenttextcontent.py | 13 - src/mistralai/models/documentupdatein.py | 65 - src/mistralai/models/documenturlchunk.py | 56 - src/mistralai/models/embeddingdtype.py | 13 - src/mistralai/models/embeddingrequest.py | 84 - src/mistralai/models/embeddingresponse.py | 28 - src/mistralai/models/embeddingresponsedata.py | 20 - src/mistralai/models/encodingformat.py | 10 - src/mistralai/models/entitytype.py | 16 - src/mistralai/models/eventout.py | 55 - src/mistralai/models/file.py | 33 - src/mistralai/models/filechunk.py | 23 - src/mistralai/models/filepurpose.py | 15 - .../models/files_api_routes_delete_fileop.py | 16 - .../files_api_routes_download_fileop.py | 16 - .../files_api_routes_get_signed_urlop.py | 25 - .../models/files_api_routes_list_filesop.py | 103 - .../files_api_routes_retrieve_fileop.py | 16 - .../models/files_api_routes_upload_fileop.py | 40 - src/mistralai/models/fileschema.py | 88 - src/mistralai/models/filesignedurl.py | 13 - src/mistralai/models/fimcompletionrequest.py | 124 - src/mistralai/models/fimcompletionresponse.py | 31 - .../models/fimcompletionstreamrequest.py | 122 - src/mistralai/models/finetuneablemodeltype.py | 10 - .../models/ftclassifierlossfunction.py | 10 - .../models/ftmodelcapabilitiesout.py | 26 - src/mistralai/models/ftmodelcard.py | 126 - src/mistralai/models/function.py | 23 - src/mistralai/models/functioncall.py | 23 - src/mistralai/models/functioncallentry.py | 77 - .../models/functioncallentryarguments.py | 15 - src/mistralai/models/functioncallevent.py | 36 - src/mistralai/models/functionname.py | 17 - src/mistralai/models/functionresultentry.py | 70 - src/mistralai/models/functiontool.py | 21 - src/mistralai/models/githubrepositoryin.py | 63 - src/mistralai/models/githubrepositoryout.py | 63 - src/mistralai/models/httpvalidationerror.py | 28 - src/mistralai/models/imagegenerationtool.py | 17 - src/mistralai/models/imageurl.py | 47 - src/mistralai/models/imageurlchunk.py | 33 - src/mistralai/models/inputentries.py | 37 - src/mistralai/models/inputs.py | 54 - src/mistralai/models/instructrequest.py | 42 - src/mistralai/models/jobin.py | 141 - src/mistralai/models/jobmetadataout.py | 78 - ...obs_api_routes_batch_cancel_batch_jobop.py | 16 - .../jobs_api_routes_batch_get_batch_jobop.py | 53 - .../jobs_api_routes_batch_get_batch_jobsop.py | 102 - ..._fine_tuning_archive_fine_tuned_modelop.py | 18 - ...es_fine_tuning_cancel_fine_tuning_jobop.py | 45 - ...es_fine_tuning_create_fine_tuning_jobop.py | 38 - ...outes_fine_tuning_get_fine_tuning_jobop.py | 45 - ...utes_fine_tuning_get_fine_tuning_jobsop.py | 156 - ...tes_fine_tuning_start_fine_tuning_jobop.py | 43 - ...ine_tuning_unarchive_fine_tuned_modelop.py | 18 - ...s_fine_tuning_update_fine_tuned_modelop.py | 51 - src/mistralai/models/jobsout.py | 41 - src/mistralai/models/jsonschema.py | 55 - src/mistralai/models/legacyjobmetadataout.py | 119 - src/mistralai/models/libraries_delete_v1op.py | 16 - .../models/libraries_documents_delete_v1op.py | 21 - ...ents_get_extracted_text_signed_url_v1op.py | 21 - ...libraries_documents_get_signed_url_v1op.py | 21 - .../libraries_documents_get_status_v1op.py | 21 - ...braries_documents_get_text_content_v1op.py | 21 - .../models/libraries_documents_get_v1op.py | 21 - .../models/libraries_documents_list_v1op.py | 91 - .../libraries_documents_reprocess_v1op.py | 21 - .../models/libraries_documents_update_v1op.py | 28 - .../models/libraries_documents_upload_v1op.py | 56 - src/mistralai/models/libraries_get_v1op.py | 16 - .../models/libraries_share_create_v1op.py | 22 - .../models/libraries_share_delete_v1op.py | 23 - .../models/libraries_share_list_v1op.py | 16 - src/mistralai/models/libraries_update_v1op.py | 23 - src/mistralai/models/libraryin.py | 50 - src/mistralai/models/libraryinupdate.py | 47 - src/mistralai/models/libraryout.py | 110 - src/mistralai/models/listdocumentout.py | 19 - src/mistralai/models/listfilesout.py | 52 - src/mistralai/models/listlibraryout.py | 15 - src/mistralai/models/listsharingout.py | 15 - src/mistralai/models/messageentries.py | 18 - .../models/messageinputcontentchunks.py | 28 - src/mistralai/models/messageinputentry.py | 105 - .../models/messageoutputcontentchunks.py | 37 - src/mistralai/models/messageoutputentry.py | 103 - src/mistralai/models/messageoutputevent.py | 95 - src/mistralai/models/metricout.py | 54 - src/mistralai/models/mistralerror.py | 30 - src/mistralai/models/mistralpromptmode.py | 12 - src/mistralai/models/modelcapabilities.py | 41 - src/mistralai/models/modelconversation.py | 133 - src/mistralai/models/modellist.py | 34 - src/mistralai/models/moderationobject.py | 21 - src/mistralai/models/moderationresponse.py | 21 - src/mistralai/models/no_response_error.py | 17 - src/mistralai/models/ocrimageobject.py | 83 - src/mistralai/models/ocrpagedimensions.py | 25 - src/mistralai/models/ocrpageobject.py | 85 - src/mistralai/models/ocrrequest.py | 140 - src/mistralai/models/ocrresponse.py | 62 - src/mistralai/models/ocrtableobject.py | 34 - src/mistralai/models/ocrusageinfo.py | 51 - src/mistralai/models/outputcontentchunks.py | 37 - src/mistralai/models/paginationinfo.py | 25 - src/mistralai/models/prediction.py | 29 - src/mistralai/models/processingstatusout.py | 16 - .../models/realtimetranscriptionerror.py | 27 - .../realtimetranscriptionerrordetail.py | 29 - .../models/realtimetranscriptionsession.py | 20 - .../realtimetranscriptionsessioncreated.py | 30 - .../realtimetranscriptionsessionupdated.py | 30 - src/mistralai/models/referencechunk.py | 20 - src/mistralai/models/requestsource.py | 11 - src/mistralai/models/responsedoneevent.py | 25 - src/mistralai/models/responseerrorevent.py | 27 - src/mistralai/models/responseformat.py | 54 - src/mistralai/models/responseformats.py | 11 - src/mistralai/models/responsestartedevent.py | 24 - .../models/responsevalidationerror.py | 27 - ...retrieve_model_v1_models_model_id_getop.py | 38 - src/mistralai/models/retrievefileout.py | 91 - src/mistralai/models/sampletype.py | 17 - src/mistralai/models/sdkerror.py | 40 - src/mistralai/models/security.py | 25 - src/mistralai/models/shareenum.py | 14 - src/mistralai/models/sharingdelete.py | 55 - src/mistralai/models/sharingin.py | 59 - src/mistralai/models/sharingout.py | 59 - src/mistralai/models/source.py | 15 - src/mistralai/models/ssetypes.py | 19 - src/mistralai/models/systemmessage.py | 35 - .../models/systemmessagecontentchunks.py | 21 - src/mistralai/models/textchunk.py | 20 - src/mistralai/models/thinkchunk.py | 35 - src/mistralai/models/timestampgranularity.py | 10 - src/mistralai/models/tool.py | 19 - src/mistralai/models/toolcall.py | 25 - src/mistralai/models/toolchoice.py | 25 - src/mistralai/models/toolchoiceenum.py | 12 - .../models/toolexecutiondeltaevent.py | 44 - .../models/toolexecutiondoneevent.py | 44 - src/mistralai/models/toolexecutionentry.py | 80 - .../models/toolexecutionstartedevent.py | 44 - src/mistralai/models/toolfilechunk.py | 69 - src/mistralai/models/toolmessage.py | 66 - src/mistralai/models/toolreferencechunk.py | 74 - src/mistralai/models/tooltypes.py | 8 - src/mistralai/models/trainingfile.py | 17 - src/mistralai/models/transcriptionresponse.py | 79 - .../models/transcriptionsegmentchunk.py | 80 - .../models/transcriptionstreamdone.py | 85 - .../models/transcriptionstreamevents.py | 58 - .../models/transcriptionstreameventtypes.py | 12 - .../models/transcriptionstreamlanguage.py | 35 - .../models/transcriptionstreamsegmentdelta.py | 77 - .../models/transcriptionstreamtextdelta.py | 35 - src/mistralai/models/unarchiveftmodelout.py | 23 - src/mistralai/models/updateftmodelin.py | 47 - src/mistralai/models/uploadfileout.py | 88 - src/mistralai/models/usageinfo.py | 76 - src/mistralai/models/usermessage.py | 60 - src/mistralai/models/validationerror.py | 26 - src/mistralai/models/wandbintegration.py | 66 - src/mistralai/models/wandbintegrationout.py | 64 - src/mistralai/models/websearchpremiumtool.py | 17 - src/mistralai/models/websearchtool.py | 17 - src/mistralai/models_.py | 1063 ------ src/mistralai/ocr.py | 303 -- src/mistralai/py.typed | 1 - src/mistralai/sdk.py | 222 -- src/mistralai/sdkconfiguration.py | 53 - src/mistralai/transcriptions.py | 481 --- src/mistralai/types/__init__.py | 21 - src/mistralai/types/basemodel.py | 77 - src/mistralai/utils/__init__.py | 197 -- src/mistralai/utils/annotations.py | 79 - src/mistralai/utils/datetimes.py | 23 - src/mistralai/utils/enums.py | 134 - src/mistralai/utils/eventstreaming.py | 248 -- src/mistralai/utils/forms.py | 234 -- src/mistralai/utils/headers.py | 136 - src/mistralai/utils/logger.py | 27 - src/mistralai/utils/metadata.py | 118 - src/mistralai/utils/queryparams.py | 217 -- src/mistralai/utils/requestbodies.py | 66 - src/mistralai/utils/retries.py | 281 -- src/mistralai/utils/security.py | 192 -- src/mistralai/utils/serializers.py | 229 -- .../utils/unmarshal_json_response.py | 38 - src/mistralai/utils/url.py | 155 - src/mistralai/utils/values.py | 137 - 313 files changed, 35975 deletions(-) delete mode 100644 src/mistralai/__init__.py delete mode 100644 src/mistralai/_hooks/__init__.py delete mode 100644 src/mistralai/_hooks/sdkhooks.py delete mode 100644 src/mistralai/_hooks/types.py delete mode 100644 src/mistralai/_version.py delete mode 100644 src/mistralai/accesses.py delete mode 100644 src/mistralai/agents.py delete mode 100644 src/mistralai/async_client.py delete mode 100644 src/mistralai/audio.py delete mode 100644 src/mistralai/basesdk.py delete mode 100644 src/mistralai/batch.py delete mode 100644 src/mistralai/beta.py delete mode 100644 src/mistralai/chat.py delete mode 100644 src/mistralai/classifiers.py delete mode 100644 src/mistralai/client.py delete mode 100644 src/mistralai/conversations.py delete mode 100644 src/mistralai/documents.py delete mode 100644 src/mistralai/embeddings.py delete mode 100644 src/mistralai/files.py delete mode 100644 src/mistralai/fim.py delete mode 100644 src/mistralai/fine_tuning.py delete mode 100644 src/mistralai/httpclient.py delete mode 100644 src/mistralai/jobs.py delete mode 100644 src/mistralai/libraries.py delete mode 100644 src/mistralai/mistral_agents.py delete mode 100644 src/mistralai/mistral_jobs.py delete mode 100644 src/mistralai/models/__init__.py delete mode 100644 src/mistralai/models/agent.py delete mode 100644 src/mistralai/models/agentaliasresponse.py delete mode 100644 src/mistralai/models/agentconversation.py delete mode 100644 src/mistralai/models/agentcreationrequest.py delete mode 100644 src/mistralai/models/agenthandoffdoneevent.py delete mode 100644 src/mistralai/models/agenthandoffentry.py delete mode 100644 src/mistralai/models/agenthandoffstartedevent.py delete mode 100644 src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py delete mode 100644 src/mistralai/models/agents_api_v1_agents_deleteop.py delete mode 100644 src/mistralai/models/agents_api_v1_agents_get_versionop.py delete mode 100644 src/mistralai/models/agents_api_v1_agents_getop.py delete mode 100644 src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py delete mode 100644 src/mistralai/models/agents_api_v1_agents_list_versionsop.py delete mode 100644 src/mistralai/models/agents_api_v1_agents_listop.py delete mode 100644 src/mistralai/models/agents_api_v1_agents_update_versionop.py delete mode 100644 src/mistralai/models/agents_api_v1_agents_updateop.py delete mode 100644 src/mistralai/models/agents_api_v1_conversations_append_streamop.py delete mode 100644 src/mistralai/models/agents_api_v1_conversations_appendop.py delete mode 100644 src/mistralai/models/agents_api_v1_conversations_deleteop.py delete mode 100644 src/mistralai/models/agents_api_v1_conversations_getop.py delete mode 100644 src/mistralai/models/agents_api_v1_conversations_historyop.py delete mode 100644 src/mistralai/models/agents_api_v1_conversations_listop.py delete mode 100644 src/mistralai/models/agents_api_v1_conversations_messagesop.py delete mode 100644 src/mistralai/models/agents_api_v1_conversations_restart_streamop.py delete mode 100644 src/mistralai/models/agents_api_v1_conversations_restartop.py delete mode 100644 src/mistralai/models/agentscompletionrequest.py delete mode 100644 src/mistralai/models/agentscompletionstreamrequest.py delete mode 100644 src/mistralai/models/agentupdaterequest.py delete mode 100644 src/mistralai/models/apiendpoint.py delete mode 100644 src/mistralai/models/archiveftmodelout.py delete mode 100644 src/mistralai/models/assistantmessage.py delete mode 100644 src/mistralai/models/audiochunk.py delete mode 100644 src/mistralai/models/audioencoding.py delete mode 100644 src/mistralai/models/audioformat.py delete mode 100644 src/mistralai/models/audiotranscriptionrequest.py delete mode 100644 src/mistralai/models/audiotranscriptionrequeststream.py delete mode 100644 src/mistralai/models/basemodelcard.py delete mode 100644 src/mistralai/models/batcherror.py delete mode 100644 src/mistralai/models/batchjobin.py delete mode 100644 src/mistralai/models/batchjobout.py delete mode 100644 src/mistralai/models/batchjobsout.py delete mode 100644 src/mistralai/models/batchjobstatus.py delete mode 100644 src/mistralai/models/batchrequest.py delete mode 100644 src/mistralai/models/builtinconnectors.py delete mode 100644 src/mistralai/models/chatclassificationrequest.py delete mode 100644 src/mistralai/models/chatcompletionchoice.py delete mode 100644 src/mistralai/models/chatcompletionrequest.py delete mode 100644 src/mistralai/models/chatcompletionresponse.py delete mode 100644 src/mistralai/models/chatcompletionstreamrequest.py delete mode 100644 src/mistralai/models/chatmoderationrequest.py delete mode 100644 src/mistralai/models/checkpointout.py delete mode 100644 src/mistralai/models/classificationrequest.py delete mode 100644 src/mistralai/models/classificationresponse.py delete mode 100644 src/mistralai/models/classificationtargetresult.py delete mode 100644 src/mistralai/models/classifierdetailedjobout.py delete mode 100644 src/mistralai/models/classifierftmodelout.py delete mode 100644 src/mistralai/models/classifierjobout.py delete mode 100644 src/mistralai/models/classifiertargetin.py delete mode 100644 src/mistralai/models/classifiertargetout.py delete mode 100644 src/mistralai/models/classifiertrainingparameters.py delete mode 100644 src/mistralai/models/classifiertrainingparametersin.py delete mode 100644 src/mistralai/models/codeinterpretertool.py delete mode 100644 src/mistralai/models/completionargs.py delete mode 100644 src/mistralai/models/completionargsstop.py delete mode 100644 src/mistralai/models/completionchunk.py delete mode 100644 src/mistralai/models/completiondetailedjobout.py delete mode 100644 src/mistralai/models/completionevent.py delete mode 100644 src/mistralai/models/completionftmodelout.py delete mode 100644 src/mistralai/models/completionjobout.py delete mode 100644 src/mistralai/models/completionresponsestreamchoice.py delete mode 100644 src/mistralai/models/completiontrainingparameters.py delete mode 100644 src/mistralai/models/completiontrainingparametersin.py delete mode 100644 src/mistralai/models/contentchunk.py delete mode 100644 src/mistralai/models/conversationappendrequest.py delete mode 100644 src/mistralai/models/conversationappendstreamrequest.py delete mode 100644 src/mistralai/models/conversationevents.py delete mode 100644 src/mistralai/models/conversationhistory.py delete mode 100644 src/mistralai/models/conversationinputs.py delete mode 100644 src/mistralai/models/conversationmessages.py delete mode 100644 src/mistralai/models/conversationrequest.py delete mode 100644 src/mistralai/models/conversationresponse.py delete mode 100644 src/mistralai/models/conversationrestartrequest.py delete mode 100644 src/mistralai/models/conversationrestartstreamrequest.py delete mode 100644 src/mistralai/models/conversationstreamrequest.py delete mode 100644 src/mistralai/models/conversationusageinfo.py delete mode 100644 src/mistralai/models/delete_model_v1_models_model_id_deleteop.py delete mode 100644 src/mistralai/models/deletefileout.py delete mode 100644 src/mistralai/models/deletemodelout.py delete mode 100644 src/mistralai/models/deltamessage.py delete mode 100644 src/mistralai/models/documentlibrarytool.py delete mode 100644 src/mistralai/models/documentout.py delete mode 100644 src/mistralai/models/documenttextcontent.py delete mode 100644 src/mistralai/models/documentupdatein.py delete mode 100644 src/mistralai/models/documenturlchunk.py delete mode 100644 src/mistralai/models/embeddingdtype.py delete mode 100644 src/mistralai/models/embeddingrequest.py delete mode 100644 src/mistralai/models/embeddingresponse.py delete mode 100644 src/mistralai/models/embeddingresponsedata.py delete mode 100644 src/mistralai/models/encodingformat.py delete mode 100644 src/mistralai/models/entitytype.py delete mode 100644 src/mistralai/models/eventout.py delete mode 100644 src/mistralai/models/file.py delete mode 100644 src/mistralai/models/filechunk.py delete mode 100644 src/mistralai/models/filepurpose.py delete mode 100644 src/mistralai/models/files_api_routes_delete_fileop.py delete mode 100644 src/mistralai/models/files_api_routes_download_fileop.py delete mode 100644 src/mistralai/models/files_api_routes_get_signed_urlop.py delete mode 100644 src/mistralai/models/files_api_routes_list_filesop.py delete mode 100644 src/mistralai/models/files_api_routes_retrieve_fileop.py delete mode 100644 src/mistralai/models/files_api_routes_upload_fileop.py delete mode 100644 src/mistralai/models/fileschema.py delete mode 100644 src/mistralai/models/filesignedurl.py delete mode 100644 src/mistralai/models/fimcompletionrequest.py delete mode 100644 src/mistralai/models/fimcompletionresponse.py delete mode 100644 src/mistralai/models/fimcompletionstreamrequest.py delete mode 100644 src/mistralai/models/finetuneablemodeltype.py delete mode 100644 src/mistralai/models/ftclassifierlossfunction.py delete mode 100644 src/mistralai/models/ftmodelcapabilitiesout.py delete mode 100644 src/mistralai/models/ftmodelcard.py delete mode 100644 src/mistralai/models/function.py delete mode 100644 src/mistralai/models/functioncall.py delete mode 100644 src/mistralai/models/functioncallentry.py delete mode 100644 src/mistralai/models/functioncallentryarguments.py delete mode 100644 src/mistralai/models/functioncallevent.py delete mode 100644 src/mistralai/models/functionname.py delete mode 100644 src/mistralai/models/functionresultentry.py delete mode 100644 src/mistralai/models/functiontool.py delete mode 100644 src/mistralai/models/githubrepositoryin.py delete mode 100644 src/mistralai/models/githubrepositoryout.py delete mode 100644 src/mistralai/models/httpvalidationerror.py delete mode 100644 src/mistralai/models/imagegenerationtool.py delete mode 100644 src/mistralai/models/imageurl.py delete mode 100644 src/mistralai/models/imageurlchunk.py delete mode 100644 src/mistralai/models/inputentries.py delete mode 100644 src/mistralai/models/inputs.py delete mode 100644 src/mistralai/models/instructrequest.py delete mode 100644 src/mistralai/models/jobin.py delete mode 100644 src/mistralai/models/jobmetadataout.py delete mode 100644 src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py delete mode 100644 src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py delete mode 100644 src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py delete mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py delete mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py delete mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py delete mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py delete mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py delete mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py delete mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py delete mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py delete mode 100644 src/mistralai/models/jobsout.py delete mode 100644 src/mistralai/models/jsonschema.py delete mode 100644 src/mistralai/models/legacyjobmetadataout.py delete mode 100644 src/mistralai/models/libraries_delete_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_delete_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_get_signed_url_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_get_status_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_get_text_content_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_get_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_list_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_reprocess_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_update_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_upload_v1op.py delete mode 100644 src/mistralai/models/libraries_get_v1op.py delete mode 100644 src/mistralai/models/libraries_share_create_v1op.py delete mode 100644 src/mistralai/models/libraries_share_delete_v1op.py delete mode 100644 src/mistralai/models/libraries_share_list_v1op.py delete mode 100644 src/mistralai/models/libraries_update_v1op.py delete mode 100644 src/mistralai/models/libraryin.py delete mode 100644 src/mistralai/models/libraryinupdate.py delete mode 100644 src/mistralai/models/libraryout.py delete mode 100644 src/mistralai/models/listdocumentout.py delete mode 100644 src/mistralai/models/listfilesout.py delete mode 100644 src/mistralai/models/listlibraryout.py delete mode 100644 src/mistralai/models/listsharingout.py delete mode 100644 src/mistralai/models/messageentries.py delete mode 100644 src/mistralai/models/messageinputcontentchunks.py delete mode 100644 src/mistralai/models/messageinputentry.py delete mode 100644 src/mistralai/models/messageoutputcontentchunks.py delete mode 100644 src/mistralai/models/messageoutputentry.py delete mode 100644 src/mistralai/models/messageoutputevent.py delete mode 100644 src/mistralai/models/metricout.py delete mode 100644 src/mistralai/models/mistralerror.py delete mode 100644 src/mistralai/models/mistralpromptmode.py delete mode 100644 src/mistralai/models/modelcapabilities.py delete mode 100644 src/mistralai/models/modelconversation.py delete mode 100644 src/mistralai/models/modellist.py delete mode 100644 src/mistralai/models/moderationobject.py delete mode 100644 src/mistralai/models/moderationresponse.py delete mode 100644 src/mistralai/models/no_response_error.py delete mode 100644 src/mistralai/models/ocrimageobject.py delete mode 100644 src/mistralai/models/ocrpagedimensions.py delete mode 100644 src/mistralai/models/ocrpageobject.py delete mode 100644 src/mistralai/models/ocrrequest.py delete mode 100644 src/mistralai/models/ocrresponse.py delete mode 100644 src/mistralai/models/ocrtableobject.py delete mode 100644 src/mistralai/models/ocrusageinfo.py delete mode 100644 src/mistralai/models/outputcontentchunks.py delete mode 100644 src/mistralai/models/paginationinfo.py delete mode 100644 src/mistralai/models/prediction.py delete mode 100644 src/mistralai/models/processingstatusout.py delete mode 100644 src/mistralai/models/realtimetranscriptionerror.py delete mode 100644 src/mistralai/models/realtimetranscriptionerrordetail.py delete mode 100644 src/mistralai/models/realtimetranscriptionsession.py delete mode 100644 src/mistralai/models/realtimetranscriptionsessioncreated.py delete mode 100644 src/mistralai/models/realtimetranscriptionsessionupdated.py delete mode 100644 src/mistralai/models/referencechunk.py delete mode 100644 src/mistralai/models/requestsource.py delete mode 100644 src/mistralai/models/responsedoneevent.py delete mode 100644 src/mistralai/models/responseerrorevent.py delete mode 100644 src/mistralai/models/responseformat.py delete mode 100644 src/mistralai/models/responseformats.py delete mode 100644 src/mistralai/models/responsestartedevent.py delete mode 100644 src/mistralai/models/responsevalidationerror.py delete mode 100644 src/mistralai/models/retrieve_model_v1_models_model_id_getop.py delete mode 100644 src/mistralai/models/retrievefileout.py delete mode 100644 src/mistralai/models/sampletype.py delete mode 100644 src/mistralai/models/sdkerror.py delete mode 100644 src/mistralai/models/security.py delete mode 100644 src/mistralai/models/shareenum.py delete mode 100644 src/mistralai/models/sharingdelete.py delete mode 100644 src/mistralai/models/sharingin.py delete mode 100644 src/mistralai/models/sharingout.py delete mode 100644 src/mistralai/models/source.py delete mode 100644 src/mistralai/models/ssetypes.py delete mode 100644 src/mistralai/models/systemmessage.py delete mode 100644 src/mistralai/models/systemmessagecontentchunks.py delete mode 100644 src/mistralai/models/textchunk.py delete mode 100644 src/mistralai/models/thinkchunk.py delete mode 100644 src/mistralai/models/timestampgranularity.py delete mode 100644 src/mistralai/models/tool.py delete mode 100644 src/mistralai/models/toolcall.py delete mode 100644 src/mistralai/models/toolchoice.py delete mode 100644 src/mistralai/models/toolchoiceenum.py delete mode 100644 src/mistralai/models/toolexecutiondeltaevent.py delete mode 100644 src/mistralai/models/toolexecutiondoneevent.py delete mode 100644 src/mistralai/models/toolexecutionentry.py delete mode 100644 src/mistralai/models/toolexecutionstartedevent.py delete mode 100644 src/mistralai/models/toolfilechunk.py delete mode 100644 src/mistralai/models/toolmessage.py delete mode 100644 src/mistralai/models/toolreferencechunk.py delete mode 100644 src/mistralai/models/tooltypes.py delete mode 100644 src/mistralai/models/trainingfile.py delete mode 100644 src/mistralai/models/transcriptionresponse.py delete mode 100644 src/mistralai/models/transcriptionsegmentchunk.py delete mode 100644 src/mistralai/models/transcriptionstreamdone.py delete mode 100644 src/mistralai/models/transcriptionstreamevents.py delete mode 100644 src/mistralai/models/transcriptionstreameventtypes.py delete mode 100644 src/mistralai/models/transcriptionstreamlanguage.py delete mode 100644 src/mistralai/models/transcriptionstreamsegmentdelta.py delete mode 100644 src/mistralai/models/transcriptionstreamtextdelta.py delete mode 100644 src/mistralai/models/unarchiveftmodelout.py delete mode 100644 src/mistralai/models/updateftmodelin.py delete mode 100644 src/mistralai/models/uploadfileout.py delete mode 100644 src/mistralai/models/usageinfo.py delete mode 100644 src/mistralai/models/usermessage.py delete mode 100644 src/mistralai/models/validationerror.py delete mode 100644 src/mistralai/models/wandbintegration.py delete mode 100644 src/mistralai/models/wandbintegrationout.py delete mode 100644 src/mistralai/models/websearchpremiumtool.py delete mode 100644 src/mistralai/models/websearchtool.py delete mode 100644 src/mistralai/models_.py delete mode 100644 src/mistralai/ocr.py delete mode 100644 src/mistralai/py.typed delete mode 100644 src/mistralai/sdk.py delete mode 100644 src/mistralai/sdkconfiguration.py delete mode 100644 src/mistralai/transcriptions.py delete mode 100644 src/mistralai/types/__init__.py delete mode 100644 src/mistralai/types/basemodel.py delete mode 100644 src/mistralai/utils/__init__.py delete mode 100644 src/mistralai/utils/annotations.py delete mode 100644 src/mistralai/utils/datetimes.py delete mode 100644 src/mistralai/utils/enums.py delete mode 100644 src/mistralai/utils/eventstreaming.py delete mode 100644 src/mistralai/utils/forms.py delete mode 100644 src/mistralai/utils/headers.py delete mode 100644 src/mistralai/utils/logger.py delete mode 100644 src/mistralai/utils/metadata.py delete mode 100644 src/mistralai/utils/queryparams.py delete mode 100644 src/mistralai/utils/requestbodies.py delete mode 100644 src/mistralai/utils/retries.py delete mode 100644 src/mistralai/utils/security.py delete mode 100644 src/mistralai/utils/serializers.py delete mode 100644 src/mistralai/utils/unmarshal_json_response.py delete mode 100644 src/mistralai/utils/url.py delete mode 100644 src/mistralai/utils/values.py diff --git a/src/mistralai/__init__.py b/src/mistralai/__init__.py deleted file mode 100644 index dd02e42e..00000000 --- a/src/mistralai/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from ._version import ( - __title__, - __version__, - __openapi_doc_version__, - __gen_version__, - __user_agent__, -) -from .sdk import * -from .sdkconfiguration import * -from .models import * - - -VERSION: str = __version__ -OPENAPI_DOC_VERSION = __openapi_doc_version__ -SPEAKEASY_GENERATOR_VERSION = __gen_version__ -USER_AGENT = __user_agent__ diff --git a/src/mistralai/_hooks/__init__.py b/src/mistralai/_hooks/__init__.py deleted file mode 100644 index 2ee66cdd..00000000 --- a/src/mistralai/_hooks/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .sdkhooks import * -from .types import * -from .registration import * diff --git a/src/mistralai/_hooks/sdkhooks.py b/src/mistralai/_hooks/sdkhooks.py deleted file mode 100644 index 1f9a9316..00000000 --- a/src/mistralai/_hooks/sdkhooks.py +++ /dev/null @@ -1,76 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import httpx -from .types import ( - SDKInitHook, - BeforeRequestContext, - BeforeRequestHook, - AfterSuccessContext, - AfterSuccessHook, - AfterErrorContext, - AfterErrorHook, - Hooks, -) -from .registration import init_hooks -from typing import List, Optional, Tuple -from mistralai.httpclient import HttpClient - - -class SDKHooks(Hooks): - def __init__(self) -> None: - self.sdk_init_hooks: List[SDKInitHook] = [] - self.before_request_hooks: List[BeforeRequestHook] = [] - self.after_success_hooks: List[AfterSuccessHook] = [] - self.after_error_hooks: List[AfterErrorHook] = [] - init_hooks(self) - - def register_sdk_init_hook(self, hook: SDKInitHook) -> None: - self.sdk_init_hooks.append(hook) - - def register_before_request_hook(self, hook: BeforeRequestHook) -> None: - self.before_request_hooks.append(hook) - - def register_after_success_hook(self, hook: AfterSuccessHook) -> None: - self.after_success_hooks.append(hook) - - def register_after_error_hook(self, hook: AfterErrorHook) -> None: - self.after_error_hooks.append(hook) - - def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: - for hook in self.sdk_init_hooks: - base_url, client = hook.sdk_init(base_url, client) - return base_url, client - - def before_request( - self, hook_ctx: BeforeRequestContext, request: httpx.Request - ) -> httpx.Request: - for hook in self.before_request_hooks: - out = hook.before_request(hook_ctx, request) - if isinstance(out, Exception): - raise out - request = out - - return request - - def after_success( - self, hook_ctx: AfterSuccessContext, response: httpx.Response - ) -> httpx.Response: - for hook in self.after_success_hooks: - out = hook.after_success(hook_ctx, response) - if isinstance(out, Exception): - raise out - response = out - return response - - def after_error( - self, - hook_ctx: AfterErrorContext, - response: Optional[httpx.Response], - error: Optional[Exception], - ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: - for hook in self.after_error_hooks: - result = hook.after_error(hook_ctx, response, error) - if isinstance(result, Exception): - raise result - response, error = result - return response, error diff --git a/src/mistralai/_hooks/types.py b/src/mistralai/_hooks/types.py deleted file mode 100644 index 6d0f3e11..00000000 --- a/src/mistralai/_hooks/types.py +++ /dev/null @@ -1,113 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from abc import ABC, abstractmethod -import httpx -from mistralai.httpclient import HttpClient -from mistralai.sdkconfiguration import SDKConfiguration -from typing import Any, Callable, List, Optional, Tuple, Union - - -class HookContext: - config: SDKConfiguration - base_url: str - operation_id: str - oauth2_scopes: Optional[List[str]] = None - security_source: Optional[Union[Any, Callable[[], Any]]] = None - - def __init__( - self, - config: SDKConfiguration, - base_url: str, - operation_id: str, - oauth2_scopes: Optional[List[str]], - security_source: Optional[Union[Any, Callable[[], Any]]], - ): - self.config = config - self.base_url = base_url - self.operation_id = operation_id - self.oauth2_scopes = oauth2_scopes - self.security_source = security_source - - -class BeforeRequestContext(HookContext): - def __init__(self, hook_ctx: HookContext): - super().__init__( - hook_ctx.config, - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, - ) - - -class AfterSuccessContext(HookContext): - def __init__(self, hook_ctx: HookContext): - super().__init__( - hook_ctx.config, - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, - ) - - -class AfterErrorContext(HookContext): - def __init__(self, hook_ctx: HookContext): - super().__init__( - hook_ctx.config, - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, - ) - - -class SDKInitHook(ABC): - @abstractmethod - def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: - pass - - -class BeforeRequestHook(ABC): - @abstractmethod - def before_request( - self, hook_ctx: BeforeRequestContext, request: httpx.Request - ) -> Union[httpx.Request, Exception]: - pass - - -class AfterSuccessHook(ABC): - @abstractmethod - def after_success( - self, hook_ctx: AfterSuccessContext, response: httpx.Response - ) -> Union[httpx.Response, Exception]: - pass - - -class AfterErrorHook(ABC): - @abstractmethod - def after_error( - self, - hook_ctx: AfterErrorContext, - response: Optional[httpx.Response], - error: Optional[Exception], - ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: - pass - - -class Hooks(ABC): - @abstractmethod - def register_sdk_init_hook(self, hook: SDKInitHook): - pass - - @abstractmethod - def register_before_request_hook(self, hook: BeforeRequestHook): - pass - - @abstractmethod - def register_after_success_hook(self, hook: AfterSuccessHook): - pass - - @abstractmethod - def register_after_error_hook(self, hook: AfterErrorHook): - pass diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py deleted file mode 100644 index 6ee91593..00000000 --- a/src/mistralai/_version.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import importlib.metadata - -__title__: str = "mistralai" -__version__: str = "1.12.0" -__openapi_doc_version__: str = "1.0.0" -__gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 1.12.0 2.794.1 1.0.0 mistralai" - -try: - if __package__ is not None: - __version__ = importlib.metadata.version(__package__) -except importlib.metadata.PackageNotFoundError: - pass diff --git a/src/mistralai/accesses.py b/src/mistralai/accesses.py deleted file mode 100644 index be02ee5b..00000000 --- a/src/mistralai/accesses.py +++ /dev/null @@ -1,619 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - entitytype as models_entitytype, - shareenum as models_shareenum, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Mapping, Optional - - -class Accesses(BaseSDK): - r"""(beta) Libraries API - manage access to a library.""" - - def list( - self, - *, - library_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListSharingOut: - r"""List all of the access to this library. - - Given a library, list all of the Entity that have access and to what level. - - :param library_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesShareListV1Request( - library_id=library_id, - ) - - req = self._build_request( - method="GET", - path="/v1/libraries/{library_id}/share", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_share_list_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListSharingOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_async( - self, - *, - library_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListSharingOut: - r"""List all of the access to this library. - - Given a library, list all of the Entity that have access and to what level. - - :param library_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesShareListV1Request( - library_id=library_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/libraries/{library_id}/share", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_share_list_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListSharingOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def update_or_create( - self, - *, - library_id: str, - level: models_shareenum.ShareEnum, - share_with_uuid: str, - share_with_type: models_entitytype.EntityType, - org_id: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.SharingOut: - r"""Create or update an access level. - - Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. - - :param library_id: - :param level: - :param share_with_uuid: The id of the entity (user, workspace or organization) to share with - :param share_with_type: The type of entity, used to share a library. - :param org_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesShareCreateV1Request( - library_id=library_id, - sharing_in=models.SharingIn( - org_id=org_id, - level=level, - share_with_uuid=share_with_uuid, - share_with_type=share_with_type, - ), - ) - - req = self._build_request( - method="PUT", - path="/v1/libraries/{library_id}/share", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.sharing_in, False, False, "json", models.SharingIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_share_create_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.SharingOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def update_or_create_async( - self, - *, - library_id: str, - level: models_shareenum.ShareEnum, - share_with_uuid: str, - share_with_type: models_entitytype.EntityType, - org_id: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.SharingOut: - r"""Create or update an access level. - - Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. - - :param library_id: - :param level: - :param share_with_uuid: The id of the entity (user, workspace or organization) to share with - :param share_with_type: The type of entity, used to share a library. - :param org_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesShareCreateV1Request( - library_id=library_id, - sharing_in=models.SharingIn( - org_id=org_id, - level=level, - share_with_uuid=share_with_uuid, - share_with_type=share_with_type, - ), - ) - - req = self._build_request_async( - method="PUT", - path="/v1/libraries/{library_id}/share", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.sharing_in, False, False, "json", models.SharingIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_share_create_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.SharingOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def delete( - self, - *, - library_id: str, - share_with_uuid: str, - share_with_type: models_entitytype.EntityType, - org_id: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.SharingOut: - r"""Delete an access level. - - Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. - - :param library_id: - :param share_with_uuid: The id of the entity (user, workspace or organization) to share with - :param share_with_type: The type of entity, used to share a library. - :param org_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesShareDeleteV1Request( - library_id=library_id, - sharing_delete=models.SharingDelete( - org_id=org_id, - share_with_uuid=share_with_uuid, - share_with_type=share_with_type, - ), - ) - - req = self._build_request( - method="DELETE", - path="/v1/libraries/{library_id}/share", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.sharing_delete, False, False, "json", models.SharingDelete - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_share_delete_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.SharingOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def delete_async( - self, - *, - library_id: str, - share_with_uuid: str, - share_with_type: models_entitytype.EntityType, - org_id: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.SharingOut: - r"""Delete an access level. - - Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. - - :param library_id: - :param share_with_uuid: The id of the entity (user, workspace or organization) to share with - :param share_with_type: The type of entity, used to share a library. - :param org_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesShareDeleteV1Request( - library_id=library_id, - sharing_delete=models.SharingDelete( - org_id=org_id, - share_with_uuid=share_with_uuid, - share_with_type=share_with_type, - ), - ) - - req = self._build_request_async( - method="DELETE", - path="/v1/libraries/{library_id}/share", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.sharing_delete, False, False, "json", models.SharingDelete - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_share_delete_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.SharingOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py deleted file mode 100644 index 73e4ee3c..00000000 --- a/src/mistralai/agents.py +++ /dev/null @@ -1,725 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - agentscompletionrequest as models_agentscompletionrequest, - agentscompletionstreamrequest as models_agentscompletionstreamrequest, - mistralpromptmode as models_mistralpromptmode, - prediction as models_prediction, - responseformat as models_responseformat, - tool as models_tool, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import eventstreaming, get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, List, Mapping, Optional, Union - - -class Agents(BaseSDK): - r"""Agents API.""" - - def complete( - self, - *, - messages: Union[ - List[models_agentscompletionrequest.AgentsCompletionRequestMessages], - List[ - models_agentscompletionrequest.AgentsCompletionRequestMessagesTypedDict - ], - ], - agent_id: str, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models_agentscompletionrequest.AgentsCompletionRequestStop, - models_agentscompletionrequest.AgentsCompletionRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = None, - tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models_agentscompletionrequest.AgentsCompletionRequestToolChoice, - models_agentscompletionrequest.AgentsCompletionRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ChatCompletionResponse: - r"""Agents Completion - - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param agent_id: The ID of the agent to use for this completion. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. - :param tools: - :param tool_choice: - :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. - :param parallel_tool_calls: - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsCompletionRequest( - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionRequestMessages] - ), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.AgentsCompletionRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - agent_id=agent_id, - ) - - req = self._build_request( - method="POST", - path="/v1/agents/completions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.AgentsCompletionRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_completion_v1_agents_completions_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ChatCompletionResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def complete_async( - self, - *, - messages: Union[ - List[models_agentscompletionrequest.AgentsCompletionRequestMessages], - List[ - models_agentscompletionrequest.AgentsCompletionRequestMessagesTypedDict - ], - ], - agent_id: str, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models_agentscompletionrequest.AgentsCompletionRequestStop, - models_agentscompletionrequest.AgentsCompletionRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = None, - tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models_agentscompletionrequest.AgentsCompletionRequestToolChoice, - models_agentscompletionrequest.AgentsCompletionRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ChatCompletionResponse: - r"""Agents Completion - - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param agent_id: The ID of the agent to use for this completion. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. - :param tools: - :param tool_choice: - :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. - :param parallel_tool_calls: - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsCompletionRequest( - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionRequestMessages] - ), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.AgentsCompletionRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - agent_id=agent_id, - ) - - req = self._build_request_async( - method="POST", - path="/v1/agents/completions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.AgentsCompletionRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_completion_v1_agents_completions_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ChatCompletionResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def stream( - self, - *, - messages: Union[ - List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessages - ], - List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessagesTypedDict - ], - ], - agent_id: str, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[ - Union[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStop, - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = None, - tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoice, - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.CompletionEvent]: - r"""Stream Agents completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param agent_id: The ID of the agent to use for this completion. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. - :param tools: - :param tool_choice: - :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. - :param parallel_tool_calls: - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsCompletionStreamRequest( - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionStreamRequestMessages] - ), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - agent_id=agent_id, - ) - - req = self._build_request( - method="POST", - path="/v1/agents/completions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.AgentsCompletionStreamRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="stream_agents", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - async def stream_async( - self, - *, - messages: Union[ - List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessages - ], - List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessagesTypedDict - ], - ], - agent_id: str, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[ - Union[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStop, - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = None, - tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoice, - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: - r"""Stream Agents completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param agent_id: The ID of the agent to use for this completion. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. - :param tools: - :param tool_choice: - :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. - :param parallel_tool_calls: - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsCompletionStreamRequest( - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionStreamRequestMessages] - ), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - agent_id=agent_id, - ) - - req = self._build_request_async( - method="POST", - path="/v1/agents/completions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.AgentsCompletionStreamRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="stream_agents", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py deleted file mode 100644 index f9522a28..00000000 --- a/src/mistralai/async_client.py +++ /dev/null @@ -1,15 +0,0 @@ -from typing import Optional - -from .client import MIGRATION_MESSAGE - - -class MistralAsyncClient: - def __init__( - self, - api_key: Optional[str] = None, - endpoint: str = "", - max_retries: int = 5, - timeout: int = 120, - max_concurrent_requests: int = 64, - ): - raise NotImplementedError(MIGRATION_MESSAGE) diff --git a/src/mistralai/audio.py b/src/mistralai/audio.py deleted file mode 100644 index 3de29053..00000000 --- a/src/mistralai/audio.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from .sdkconfiguration import SDKConfiguration -from mistralai.transcriptions import Transcriptions -from typing import Optional - -# region imports -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from mistralai.extra.realtime import RealtimeTranscription -# endregion imports - - -class Audio(BaseSDK): - transcriptions: Transcriptions - r"""API for audio transcription.""" - - def __init__( - self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None - ) -> None: - BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) - self.sdk_configuration = sdk_config - self._init_sdks() - - def _init_sdks(self): - self.transcriptions = Transcriptions( - self.sdk_configuration, parent_ref=self.parent_ref - ) - - # region sdk-class-body - @property - def realtime(self) -> "RealtimeTranscription": - """Returns a client for real-time audio transcription via WebSocket.""" - if not hasattr(self, "_realtime"): - from mistralai.extra.realtime import RealtimeTranscription # pylint: disable=import-outside-toplevel - - self._realtime = RealtimeTranscription(self.sdk_configuration) # pylint: disable=attribute-defined-outside-init - - return self._realtime - - # endregion sdk-class-body diff --git a/src/mistralai/basesdk.py b/src/mistralai/basesdk.py deleted file mode 100644 index c9a32aa1..00000000 --- a/src/mistralai/basesdk.py +++ /dev/null @@ -1,370 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .sdkconfiguration import SDKConfiguration -import httpx -from mistralai import models, utils -from mistralai._hooks import ( - AfterErrorContext, - AfterSuccessContext, - BeforeRequestContext, -) -from mistralai.utils import RetryConfig, SerializedRequestBody, get_body_content -from typing import Callable, List, Mapping, Optional, Tuple -from urllib.parse import parse_qs, urlparse - - -class BaseSDK: - sdk_configuration: SDKConfiguration - parent_ref: Optional[object] = None - """ - Reference to the root SDK instance, if any. This will prevent it from - being garbage collected while there are active streams. - """ - - def __init__( - self, - sdk_config: SDKConfiguration, - parent_ref: Optional[object] = None, - ) -> None: - self.sdk_configuration = sdk_config - self.parent_ref = parent_ref - - def _get_url(self, base_url, url_variables): - sdk_url, sdk_variables = self.sdk_configuration.get_server_details() - - if base_url is None: - base_url = sdk_url - - if url_variables is None: - url_variables = sdk_variables - - return utils.template_url(base_url, url_variables) - - def _build_request_async( - self, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals=None, - security=None, - timeout_ms: Optional[int] = None, - get_serialized_body: Optional[ - Callable[[], Optional[SerializedRequestBody]] - ] = None, - url_override: Optional[str] = None, - http_headers: Optional[Mapping[str, str]] = None, - allow_empty_value: Optional[List[str]] = None, - ) -> httpx.Request: - client = self.sdk_configuration.async_client - return self._build_request_with_client( - client, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals, - security, - timeout_ms, - get_serialized_body, - url_override, - http_headers, - allow_empty_value, - ) - - def _build_request( - self, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals=None, - security=None, - timeout_ms: Optional[int] = None, - get_serialized_body: Optional[ - Callable[[], Optional[SerializedRequestBody]] - ] = None, - url_override: Optional[str] = None, - http_headers: Optional[Mapping[str, str]] = None, - allow_empty_value: Optional[List[str]] = None, - ) -> httpx.Request: - client = self.sdk_configuration.client - return self._build_request_with_client( - client, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals, - security, - timeout_ms, - get_serialized_body, - url_override, - http_headers, - allow_empty_value, - ) - - def _build_request_with_client( - self, - client, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals=None, - security=None, - timeout_ms: Optional[int] = None, - get_serialized_body: Optional[ - Callable[[], Optional[SerializedRequestBody]] - ] = None, - url_override: Optional[str] = None, - http_headers: Optional[Mapping[str, str]] = None, - allow_empty_value: Optional[List[str]] = None, - ) -> httpx.Request: - query_params = {} - - url = url_override - if url is None: - url = utils.generate_url( - self._get_url(base_url, url_variables), - path, - request if request_has_path_params else None, - _globals if request_has_path_params else None, - ) - - query_params = utils.get_query_params( - request if request_has_query_params else None, - _globals if request_has_query_params else None, - allow_empty_value, - ) - else: - # Pick up the query parameter from the override so they can be - # preserved when building the request later on (necessary as of - # httpx 0.28). - parsed_override = urlparse(str(url_override)) - query_params = parse_qs(parsed_override.query, keep_blank_values=True) - - headers = utils.get_headers(request, _globals) - headers["Accept"] = accept_header_value - headers[user_agent_header] = self.sdk_configuration.user_agent - - if security is not None: - if callable(security): - security = security() - security = utils.get_security_from_env(security, models.Security) - if security is not None: - security_headers, security_query_params = utils.get_security(security) - headers = {**headers, **security_headers} - query_params = {**query_params, **security_query_params} - - serialized_request_body = SerializedRequestBody() - if get_serialized_body is not None: - rb = get_serialized_body() - if request_body_required and rb is None: - raise ValueError("request body is required") - - if rb is not None: - serialized_request_body = rb - - if ( - serialized_request_body.media_type is not None - and serialized_request_body.media_type - not in ( - "multipart/form-data", - "multipart/mixed", - ) - ): - headers["content-type"] = serialized_request_body.media_type - - if http_headers is not None: - for header, value in http_headers.items(): - headers[header] = value - - timeout = timeout_ms / 1000 if timeout_ms is not None else None - - return client.build_request( - method, - url, - params=query_params, - content=serialized_request_body.content, - data=serialized_request_body.data, - files=serialized_request_body.files, - headers=headers, - timeout=timeout, - ) - - def do_request( - self, - hook_ctx, - request, - error_status_codes, - stream=False, - retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, - ) -> httpx.Response: - client = self.sdk_configuration.client - logger = self.sdk_configuration.debug_logger - - hooks = self.sdk_configuration.__dict__["_hooks"] - - def do(): - http_res = None - try: - req = hooks.before_request(BeforeRequestContext(hook_ctx), request) - logger.debug( - "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", - req.method, - req.url, - req.headers, - get_body_content(req), - ) - - if client is None: - raise ValueError("client is required") - - http_res = client.send(req, stream=stream) - except Exception as e: - _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) - if e is not None: - logger.debug("Request Exception", exc_info=True) - raise e - - if http_res is None: - logger.debug("Raising no response SDK error") - raise models.NoResponseError("No response received") - - logger.debug( - "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", - http_res.status_code, - http_res.url, - http_res.headers, - "" if stream else http_res.text, - ) - - if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = hooks.after_error( - AfterErrorContext(hook_ctx), http_res, None - ) - if err is not None: - logger.debug("Request Exception", exc_info=True) - raise err - if result is not None: - http_res = result - else: - logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred", http_res) - - return http_res - - if retry_config is not None: - http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) - else: - http_res = do() - - if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) - - return http_res - - async def do_request_async( - self, - hook_ctx, - request, - error_status_codes, - stream=False, - retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, - ) -> httpx.Response: - client = self.sdk_configuration.async_client - logger = self.sdk_configuration.debug_logger - - hooks = self.sdk_configuration.__dict__["_hooks"] - - async def do(): - http_res = None - try: - req = hooks.before_request(BeforeRequestContext(hook_ctx), request) - logger.debug( - "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", - req.method, - req.url, - req.headers, - get_body_content(req), - ) - - if client is None: - raise ValueError("client is required") - - http_res = await client.send(req, stream=stream) - except Exception as e: - _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) - if e is not None: - logger.debug("Request Exception", exc_info=True) - raise e - - if http_res is None: - logger.debug("Raising no response SDK error") - raise models.NoResponseError("No response received") - - logger.debug( - "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", - http_res.status_code, - http_res.url, - http_res.headers, - "" if stream else http_res.text, - ) - - if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = hooks.after_error( - AfterErrorContext(hook_ctx), http_res, None - ) - if err is not None: - logger.debug("Request Exception", exc_info=True) - raise err - if result is not None: - http_res = result - else: - logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred", http_res) - - return http_res - - if retry_config is not None: - http_res = await utils.retry_async( - do, utils.Retries(retry_config[0], retry_config[1]) - ) - else: - http_res = await do() - - if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) - - return http_res diff --git a/src/mistralai/batch.py b/src/mistralai/batch.py deleted file mode 100644 index 7ed7ccef..00000000 --- a/src/mistralai/batch.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from .sdkconfiguration import SDKConfiguration -from mistralai.mistral_jobs import MistralJobs -from typing import Optional - - -class Batch(BaseSDK): - jobs: MistralJobs - - def __init__( - self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None - ) -> None: - BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) - self.sdk_configuration = sdk_config - self._init_sdks() - - def _init_sdks(self): - self.jobs = MistralJobs(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/beta.py b/src/mistralai/beta.py deleted file mode 100644 index 4bbf1fa3..00000000 --- a/src/mistralai/beta.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from .sdkconfiguration import SDKConfiguration -from mistralai.conversations import Conversations -from mistralai.libraries import Libraries -from mistralai.mistral_agents import MistralAgents -from typing import Optional - - -class Beta(BaseSDK): - conversations: Conversations - r"""(beta) Conversations API""" - agents: MistralAgents - r"""(beta) Agents API""" - libraries: Libraries - r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" - - def __init__( - self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None - ) -> None: - BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) - self.sdk_configuration = sdk_config - self._init_sdks() - - def _init_sdks(self): - self.conversations = Conversations( - self.sdk_configuration, parent_ref=self.parent_ref - ) - self.agents = MistralAgents(self.sdk_configuration, parent_ref=self.parent_ref) - self.libraries = Libraries(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py deleted file mode 100644 index 1528c4c9..00000000 --- a/src/mistralai/chat.py +++ /dev/null @@ -1,835 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - chatcompletionrequest as models_chatcompletionrequest, - chatcompletionstreamrequest as models_chatcompletionstreamrequest, - mistralpromptmode as models_mistralpromptmode, - prediction as models_prediction, - responseformat as models_responseformat, - tool as models_tool, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import eventstreaming, get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, List, Mapping, Optional, Union - -# region imports -from typing import Type -from mistralai.extra import ( - convert_to_parsed_chat_completion_response, - response_format_from_pydantic_model, - CustomPydanticModel, - ParsedChatCompletionResponse, -) -# endregion imports - - -class Chat(BaseSDK): - r"""Chat Completion API.""" - - # region sdk-class-body - # Custom .parse methods for the Structure Outputs Feature. - - def parse( - self, response_format: Type[CustomPydanticModel], **kwargs: Any - ) -> ParsedChatCompletionResponse[CustomPydanticModel]: - """ - Parse the response using the provided response format. - :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into - :param Any **kwargs Additional keyword arguments to pass to the .complete method - :return: The parsed response - """ - # Convert the input Pydantic Model to a strict JSON ready to be passed to chat.complete - json_response_format = response_format_from_pydantic_model(response_format) - # Run the inference - response = self.complete(**kwargs, response_format=json_response_format) - # Parse response back to the input pydantic model - parsed_response = convert_to_parsed_chat_completion_response( - response, response_format - ) - return parsed_response - - async def parse_async( - self, response_format: Type[CustomPydanticModel], **kwargs - ) -> ParsedChatCompletionResponse[CustomPydanticModel]: - """ - Asynchronously parse the response using the provided response format. - :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into - :param Any **kwargs Additional keyword arguments to pass to the .complete method - :return: The parsed response - """ - json_response_format = response_format_from_pydantic_model(response_format) - response = await self.complete_async( # pylint: disable=E1125 - **kwargs, response_format=json_response_format - ) - parsed_response = convert_to_parsed_chat_completion_response( - response, response_format - ) - return parsed_response - - def parse_stream( - self, response_format: Type[CustomPydanticModel], **kwargs - ) -> eventstreaming.EventStream[models.CompletionEvent]: - """ - Parse the response using the provided response format. - For now the response will be in JSON format not in the input Pydantic model. - :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into - :param Any **kwargs Additional keyword arguments to pass to the .stream method - :return: The JSON parsed response - """ - json_response_format = response_format_from_pydantic_model(response_format) - response = self.stream(**kwargs, response_format=json_response_format) - return response - - async def parse_stream_async( - self, response_format: Type[CustomPydanticModel], **kwargs - ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: - """ - Asynchronously parse the response using the provided response format. - For now the response will be in JSON format not in the input Pydantic model. - :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into - :param Any **kwargs Additional keyword arguments to pass to the .stream method - :return: The JSON parsed response - """ - json_response_format = response_format_from_pydantic_model(response_format) - response = await self.stream_async( # pylint: disable=E1125 - **kwargs, response_format=json_response_format - ) - return response - - # endregion sdk-class-body - - def complete( - self, - *, - model: str, - messages: Union[ - List[models_chatcompletionrequest.Messages], - List[models_chatcompletionrequest.MessagesTypedDict], - ], - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models_chatcompletionrequest.Stop, - models_chatcompletionrequest.StopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = None, - tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models_chatcompletionrequest.ChatCompletionRequestToolChoice, - models_chatcompletionrequest.ChatCompletionRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, - safe_prompt: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ChatCompletionResponse: - r"""Chat Completion - - :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. - :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. - :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. - :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. - :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param safe_prompt: Whether to inject a safety prompt before all conversations. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - safe_prompt=safe_prompt, - ) - - req = self._build_request( - method="POST", - path="/v1/chat/completions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ChatCompletionResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def complete_async( - self, - *, - model: str, - messages: Union[ - List[models_chatcompletionrequest.Messages], - List[models_chatcompletionrequest.MessagesTypedDict], - ], - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models_chatcompletionrequest.Stop, - models_chatcompletionrequest.StopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = None, - tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models_chatcompletionrequest.ChatCompletionRequestToolChoice, - models_chatcompletionrequest.ChatCompletionRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, - safe_prompt: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ChatCompletionResponse: - r"""Chat Completion - - :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. - :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. - :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. - :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. - :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param safe_prompt: Whether to inject a safety prompt before all conversations. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - safe_prompt=safe_prompt, - ) - - req = self._build_request_async( - method="POST", - path="/v1/chat/completions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ChatCompletionResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def stream( - self, - *, - model: str, - messages: Union[ - List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessages - ], - List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessagesTypedDict - ], - ], - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[ - Union[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestStop, - models_chatcompletionstreamrequest.ChatCompletionStreamRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = None, - tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoice, - models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, - safe_prompt: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.CompletionEvent]: - r"""Stream chat completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. - :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. - :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. - :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. - :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param safe_prompt: Whether to inject a safety prompt before all conversations. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionStreamRequestMessages] - ), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - safe_prompt=safe_prompt, - ) - - req = self._build_request( - method="POST", - path="/v1/chat/completions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionStreamRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="stream_chat", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - async def stream_async( - self, - *, - model: str, - messages: Union[ - List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessages - ], - List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessagesTypedDict - ], - ], - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[ - Union[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestStop, - models_chatcompletionstreamrequest.ChatCompletionStreamRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = None, - tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoice, - models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, - safe_prompt: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: - r"""Stream chat completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. - :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. - :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. - :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. - :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param safe_prompt: Whether to inject a safety prompt before all conversations. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionStreamRequestMessages] - ), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - safe_prompt=safe_prompt, - ) - - req = self._build_request_async( - method="POST", - path="/v1/chat/completions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionStreamRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="stream_chat", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py deleted file mode 100644 index 7c32506e..00000000 --- a/src/mistralai/classifiers.py +++ /dev/null @@ -1,800 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - chatmoderationrequest as models_chatmoderationrequest, - classificationrequest as models_classificationrequest, - inputs as models_inputs, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, Mapping, Optional, Union - - -class Classifiers(BaseSDK): - r"""Classifiers API.""" - - def moderate( - self, - *, - model: str, - inputs: Union[ - models_classificationrequest.ClassificationRequestInputs, - models_classificationrequest.ClassificationRequestInputsTypedDict, - ], - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ModerationResponse: - r"""Moderations - - :param model: ID of the model to use. - :param inputs: Text to classify. - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ClassificationRequest( - model=model, - metadata=metadata, - inputs=inputs, - ) - - req = self._build_request( - method="POST", - path="/v1/moderations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ClassificationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="moderations_v1_moderations_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ModerationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def moderate_async( - self, - *, - model: str, - inputs: Union[ - models_classificationrequest.ClassificationRequestInputs, - models_classificationrequest.ClassificationRequestInputsTypedDict, - ], - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ModerationResponse: - r"""Moderations - - :param model: ID of the model to use. - :param inputs: Text to classify. - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ClassificationRequest( - model=model, - metadata=metadata, - inputs=inputs, - ) - - req = self._build_request_async( - method="POST", - path="/v1/moderations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ClassificationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="moderations_v1_moderations_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ModerationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def moderate_chat( - self, - *, - inputs: Union[ - models_chatmoderationrequest.ChatModerationRequestInputs, - models_chatmoderationrequest.ChatModerationRequestInputsTypedDict, - ], - model: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ModerationResponse: - r"""Chat Moderations - - :param inputs: Chat to classify - :param model: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatModerationRequest( - inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), - model=model, - ) - - req = self._build_request( - method="POST", - path="/v1/chat/moderations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatModerationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="chat_moderations_v1_chat_moderations_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ModerationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def moderate_chat_async( - self, - *, - inputs: Union[ - models_chatmoderationrequest.ChatModerationRequestInputs, - models_chatmoderationrequest.ChatModerationRequestInputsTypedDict, - ], - model: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ModerationResponse: - r"""Chat Moderations - - :param inputs: Chat to classify - :param model: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatModerationRequest( - inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), - model=model, - ) - - req = self._build_request_async( - method="POST", - path="/v1/chat/moderations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatModerationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="chat_moderations_v1_chat_moderations_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ModerationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def classify( - self, - *, - model: str, - inputs: Union[ - models_classificationrequest.ClassificationRequestInputs, - models_classificationrequest.ClassificationRequestInputsTypedDict, - ], - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ClassificationResponse: - r"""Classifications - - :param model: ID of the model to use. - :param inputs: Text to classify. - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ClassificationRequest( - model=model, - metadata=metadata, - inputs=inputs, - ) - - req = self._build_request( - method="POST", - path="/v1/classifications", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ClassificationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="classifications_v1_classifications_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ClassificationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def classify_async( - self, - *, - model: str, - inputs: Union[ - models_classificationrequest.ClassificationRequestInputs, - models_classificationrequest.ClassificationRequestInputsTypedDict, - ], - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ClassificationResponse: - r"""Classifications - - :param model: ID of the model to use. - :param inputs: Text to classify. - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ClassificationRequest( - model=model, - metadata=metadata, - inputs=inputs, - ) - - req = self._build_request_async( - method="POST", - path="/v1/classifications", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ClassificationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="classifications_v1_classifications_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ClassificationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def classify_chat( - self, - *, - model: str, - inputs: Union[models_inputs.Inputs, models_inputs.InputsTypedDict], - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ClassificationResponse: - r"""Chat Classifications - - :param model: - :param inputs: Chat to classify - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatClassificationRequest( - model=model, - inputs=utils.get_pydantic_model(inputs, models.Inputs), - ) - - req = self._build_request( - method="POST", - path="/v1/chat/classifications", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatClassificationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="chat_classifications_v1_chat_classifications_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ClassificationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def classify_chat_async( - self, - *, - model: str, - inputs: Union[models_inputs.Inputs, models_inputs.InputsTypedDict], - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ClassificationResponse: - r"""Chat Classifications - - :param model: - :param inputs: Chat to classify - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatClassificationRequest( - model=model, - inputs=utils.get_pydantic_model(inputs, models.Inputs), - ) - - req = self._build_request_async( - method="POST", - path="/v1/chat/classifications", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatClassificationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="chat_classifications_v1_chat_classifications_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ClassificationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client.py b/src/mistralai/client.py deleted file mode 100644 index d3582f77..00000000 --- a/src/mistralai/client.py +++ /dev/null @@ -1,14 +0,0 @@ -from typing import Optional - -MIGRATION_MESSAGE = "This client is deprecated. To migrate to the new client, please refer to this guide: https://github.com/mistralai/client-python/blob/main/MIGRATION.md. If you need to use this client anyway, pin your version to 0.4.2." - - -class MistralClient: - def __init__( - self, - api_key: Optional[str] = None, - endpoint: str = "", - max_retries: int = 5, - timeout: int = 120, - ): - raise NotImplementedError(MIGRATION_MESSAGE) diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py deleted file mode 100644 index 194cb4c0..00000000 --- a/src/mistralai/conversations.py +++ /dev/null @@ -1,2865 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - completionargs as models_completionargs, - conversationappendrequest as models_conversationappendrequest, - conversationappendstreamrequest as models_conversationappendstreamrequest, - conversationinputs as models_conversationinputs, - conversationrequest as models_conversationrequest, - conversationrestartrequest as models_conversationrestartrequest, - conversationrestartstreamrequest as models_conversationrestartstreamrequest, - conversationstreamrequest as models_conversationstreamrequest, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import eventstreaming, get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, List, Mapping, Optional, Union - -# region imports -import typing -from typing import AsyncGenerator -import logging -from collections import defaultdict - -from mistralai.models import ( - ResponseStartedEvent, - ConversationEventsData, - InputEntries, -) -from mistralai.extra.run.result import ( - RunResult, - RunResultEvents, - FunctionResultEvent, - reconstitue_entries, -) -from mistralai.extra.run.utils import run_requirements -from mistralai.extra.observability.otel import GenAISpanEnum, get_or_create_otel_tracer - -logger = logging.getLogger(__name__) -tracing_enabled, tracer = get_or_create_otel_tracer() - -if typing.TYPE_CHECKING: - from mistralai.extra.run.context import RunContext - -# endregion imports - - -class Conversations(BaseSDK): - r"""(beta) Conversations API""" - - # region sdk-class-body - # Custom run code allowing client side execution of code - - @run_requirements - async def run_async( - self, - run_ctx: "RunContext", - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], - instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[ - Union[List[models.Tools], List[models.ToolsTypedDict]] - ] = UNSET, - completion_args: OptionalNullable[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> RunResult: - """Run a conversation with the given inputs and context. - - The execution of a run will only stop when no required local execution can be done.""" - from mistralai.beta import Beta - from mistralai.extra.run.context import _validate_run - from mistralai.extra.run.tools import get_function_calls - - with tracer.start_as_current_span(GenAISpanEnum.VALIDATE_RUN.value): - req, run_result, input_entries = await _validate_run( - beta_client=Beta(self.sdk_configuration), - run_ctx=run_ctx, - inputs=inputs, - instructions=instructions, - tools=tools, - completion_args=completion_args, - ) - - with tracer.start_as_current_span(GenAISpanEnum.CONVERSATION.value): - while True: - if run_ctx.conversation_id is None: - res = await self.start_async( - inputs=input_entries, - http_headers=http_headers, - name=name, - description=description, - retries=retries, - server_url=server_url, - timeout_ms=timeout_ms, - **req, # type: ignore - ) - run_result.conversation_id = res.conversation_id - run_ctx.conversation_id = res.conversation_id - logger.info( - f"Started Run with conversation with id {res.conversation_id}" - ) - else: - res = await self.append_async( - conversation_id=run_ctx.conversation_id, - inputs=input_entries, - retries=retries, - server_url=server_url, - timeout_ms=timeout_ms, - ) - run_ctx.request_count += 1 - run_result.output_entries.extend(res.outputs) - fcalls = get_function_calls(res.outputs) - if not fcalls: - logger.debug("No more function calls to execute") - break - else: - fresults = await run_ctx.execute_function_calls(fcalls) - run_result.output_entries.extend(fresults) - input_entries = typing.cast(list[InputEntries], fresults) - return run_result - - @run_requirements - async def run_stream_async( - self, - run_ctx: "RunContext", - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], - instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[ - Union[List[models.Tools], List[models.ToolsTypedDict]] - ] = UNSET, - completion_args: OptionalNullable[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> AsyncGenerator[Union[RunResultEvents, RunResult], None]: - """Similar to `run_async` but returns a generator which streams events. - - The last streamed object is the RunResult object which summarises what happened in the run.""" - from mistralai.beta import Beta - from mistralai.extra.run.context import _validate_run - from mistralai.extra.run.tools import get_function_calls - - req, run_result, input_entries = await _validate_run( - beta_client=Beta(self.sdk_configuration), - run_ctx=run_ctx, - inputs=inputs, - instructions=instructions, - tools=tools, - completion_args=completion_args, - ) - - async def run_generator() -> ( - AsyncGenerator[Union[RunResultEvents, RunResult], None] - ): - current_entries = input_entries - while True: - received_event_tracker: defaultdict[ - int, list[ConversationEventsData] - ] = defaultdict(list) - if run_ctx.conversation_id is None: - res = await self.start_stream_async( - inputs=current_entries, - http_headers=http_headers, - name=name, - description=description, - retries=retries, - server_url=server_url, - timeout_ms=timeout_ms, - **req, # type: ignore - ) - else: - res = await self.append_stream_async( - conversation_id=run_ctx.conversation_id, - inputs=current_entries, - retries=retries, - server_url=server_url, - timeout_ms=timeout_ms, - ) - async for event in res: - if ( - isinstance(event.data, ResponseStartedEvent) - and run_ctx.conversation_id is None - ): - run_result.conversation_id = event.data.conversation_id - run_ctx.conversation_id = event.data.conversation_id - logger.info( - f"Started Run with conversation with id {run_ctx.conversation_id}" - ) - if ( - output_index := getattr(event.data, "output_index", None) - ) is not None: - received_event_tracker[output_index].append(event.data) - yield typing.cast(RunResultEvents, event) - run_ctx.request_count += 1 - outputs = reconstitue_entries(received_event_tracker) - run_result.output_entries.extend(outputs) - fcalls = get_function_calls(outputs) - if not fcalls: - logger.debug("No more function calls to execute") - break - else: - fresults = await run_ctx.execute_function_calls(fcalls) - run_result.output_entries.extend(fresults) - for fresult in fresults: - yield RunResultEvents( - event="function.result", - data=FunctionResultEvent( - type="function.result", - result=fresult.result, - tool_call_id=fresult.tool_call_id, - ), - ) - current_entries = typing.cast(list[InputEntries], fresults) - yield run_result - - return run_generator() - - # endregion sdk-class-body - - def start( - self, - *, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - stream: Optional[bool] = False, - store: OptionalNullable[bool] = UNSET, - handoff_execution: OptionalNullable[ - models_conversationrequest.HandoffExecution - ] = UNSET, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models_conversationrequest.Tools], - List[models_conversationrequest.ToolsTypedDict], - ] - ] = None, - completion_args: OptionalNullable[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - agent_version: OptionalNullable[ - Union[ - models_conversationrequest.AgentVersion, - models_conversationrequest.AgentVersionTypedDict, - ] - ] = UNSET, - model: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationResponse: - r"""Create a conversation and append entries to it. - - Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. - - :param inputs: - :param stream: - :param store: - :param handoff_execution: - :param instructions: - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: - :param name: - :param description: - :param metadata: - :param agent_id: - :param agent_version: - :param model: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ConversationRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - instructions=instructions, - tools=utils.get_pydantic_model(tools, Optional[List[models.Tools]]), - completion_args=utils.get_pydantic_model( - completion_args, OptionalNullable[models.CompletionArgs] - ), - name=name, - description=description, - metadata=metadata, - agent_id=agent_id, - agent_version=agent_version, - model=model, - ) - - req = self._build_request( - method="POST", - path="/v1/conversations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ConversationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_start", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def start_async( - self, - *, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - stream: Optional[bool] = False, - store: OptionalNullable[bool] = UNSET, - handoff_execution: OptionalNullable[ - models_conversationrequest.HandoffExecution - ] = UNSET, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models_conversationrequest.Tools], - List[models_conversationrequest.ToolsTypedDict], - ] - ] = None, - completion_args: OptionalNullable[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - agent_version: OptionalNullable[ - Union[ - models_conversationrequest.AgentVersion, - models_conversationrequest.AgentVersionTypedDict, - ] - ] = UNSET, - model: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationResponse: - r"""Create a conversation and append entries to it. - - Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. - - :param inputs: - :param stream: - :param store: - :param handoff_execution: - :param instructions: - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: - :param name: - :param description: - :param metadata: - :param agent_id: - :param agent_version: - :param model: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ConversationRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - instructions=instructions, - tools=utils.get_pydantic_model(tools, Optional[List[models.Tools]]), - completion_args=utils.get_pydantic_model( - completion_args, OptionalNullable[models.CompletionArgs] - ), - name=name, - description=description, - metadata=metadata, - agent_id=agent_id, - agent_version=agent_version, - model=model, - ) - - req = self._build_request_async( - method="POST", - path="/v1/conversations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ConversationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_start", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def list( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.ResponseBody]: - r"""List all created conversations. - - Retrieve a list of conversation entities sorted by creation time. - - :param page: - :param page_size: - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsListRequest( - page=page, - page_size=page_size, - metadata=metadata, - ) - - req = self._build_request( - method="GET", - path="/v1/conversations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_list", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.ResponseBody], http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_async( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.ResponseBody]: - r"""List all created conversations. - - Retrieve a list of conversation entities sorted by creation time. - - :param page: - :param page_size: - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsListRequest( - page=page, - page_size=page_size, - metadata=metadata, - ) - - req = self._build_request_async( - method="GET", - path="/v1/conversations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_list", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.ResponseBody], http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet: - r"""Retrieve a conversation information. - - Given a conversation_id retrieve a conversation entity with its attributes. - - :param conversation_id: ID of the conversation from which we are fetching metadata. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsGetRequest( - conversation_id=conversation_id, - ) - - req = self._build_request( - method="GET", - path="/v1/conversations/{conversation_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_get", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, http_res - ) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_async( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet: - r"""Retrieve a conversation information. - - Given a conversation_id retrieve a conversation entity with its attributes. - - :param conversation_id: ID of the conversation from which we are fetching metadata. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsGetRequest( - conversation_id=conversation_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/conversations/{conversation_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_get", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, http_res - ) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def delete( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ): - r"""Delete a conversation. - - Delete a conversation given a conversation_id. - - :param conversation_id: ID of the conversation from which we are fetching metadata. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsDeleteRequest( - conversation_id=conversation_id, - ) - - req = self._build_request( - method="DELETE", - path="/v1/conversations/{conversation_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_delete", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "204", "*"): - return - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def delete_async( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ): - r"""Delete a conversation. - - Delete a conversation given a conversation_id. - - :param conversation_id: ID of the conversation from which we are fetching metadata. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsDeleteRequest( - conversation_id=conversation_id, - ) - - req = self._build_request_async( - method="DELETE", - path="/v1/conversations/{conversation_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_delete", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "204", "*"): - return - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def append( - self, - *, - conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - stream: Optional[bool] = False, - store: Optional[bool] = True, - handoff_execution: Optional[ - models_conversationappendrequest.ConversationAppendRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationResponse: - r"""Append new entries to an existing conversation. - - Run completion on the history of the conversation and the user entries. Return the new created entries. - - :param conversation_id: ID of the conversation to which we append entries. - :param inputs: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsAppendRequest( - conversation_id=conversation_id, - conversation_append_request=models.ConversationAppendRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - ), - ) - - req = self._build_request( - method="POST", - path="/v1/conversations/{conversation_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_append_request, - False, - False, - "json", - models.ConversationAppendRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_append", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def append_async( - self, - *, - conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - stream: Optional[bool] = False, - store: Optional[bool] = True, - handoff_execution: Optional[ - models_conversationappendrequest.ConversationAppendRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationResponse: - r"""Append new entries to an existing conversation. - - Run completion on the history of the conversation and the user entries. Return the new created entries. - - :param conversation_id: ID of the conversation to which we append entries. - :param inputs: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsAppendRequest( - conversation_id=conversation_id, - conversation_append_request=models.ConversationAppendRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - ), - ) - - req = self._build_request_async( - method="POST", - path="/v1/conversations/{conversation_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_append_request, - False, - False, - "json", - models.ConversationAppendRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_append", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get_history( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationHistory: - r"""Retrieve all entries in a conversation. - - Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. - - :param conversation_id: ID of the conversation from which we are fetching entries. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsHistoryRequest( - conversation_id=conversation_id, - ) - - req = self._build_request( - method="GET", - path="/v1/conversations/{conversation_id}/history", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_history", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationHistory, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_history_async( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationHistory: - r"""Retrieve all entries in a conversation. - - Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. - - :param conversation_id: ID of the conversation from which we are fetching entries. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsHistoryRequest( - conversation_id=conversation_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/conversations/{conversation_id}/history", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_history", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationHistory, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get_messages( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationMessages: - r"""Retrieve all messages in a conversation. - - Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. - - :param conversation_id: ID of the conversation from which we are fetching messages. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsMessagesRequest( - conversation_id=conversation_id, - ) - - req = self._build_request( - method="GET", - path="/v1/conversations/{conversation_id}/messages", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_messages", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationMessages, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_messages_async( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationMessages: - r"""Retrieve all messages in a conversation. - - Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. - - :param conversation_id: ID of the conversation from which we are fetching messages. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsMessagesRequest( - conversation_id=conversation_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/conversations/{conversation_id}/messages", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_messages", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationMessages, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def restart( - self, - *, - conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - from_entry_id: str, - stream: Optional[bool] = False, - store: Optional[bool] = True, - handoff_execution: Optional[ - models_conversationrestartrequest.ConversationRestartRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_version: OptionalNullable[ - Union[ - models_conversationrestartrequest.ConversationRestartRequestAgentVersion, - models_conversationrestartrequest.ConversationRestartRequestAgentVersionTypedDict, - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationResponse: - r"""Restart a conversation starting from a given entry. - - Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - - :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: - :param from_entry_id: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param metadata: Custom metadata for the conversation. - :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsRestartRequest( - conversation_id=conversation_id, - conversation_restart_request=models.ConversationRestartRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - metadata=metadata, - from_entry_id=from_entry_id, - agent_version=agent_version, - ), - ) - - req = self._build_request( - method="POST", - path="/v1/conversations/{conversation_id}/restart", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_restart_request, - False, - False, - "json", - models.ConversationRestartRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def restart_async( - self, - *, - conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - from_entry_id: str, - stream: Optional[bool] = False, - store: Optional[bool] = True, - handoff_execution: Optional[ - models_conversationrestartrequest.ConversationRestartRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_version: OptionalNullable[ - Union[ - models_conversationrestartrequest.ConversationRestartRequestAgentVersion, - models_conversationrestartrequest.ConversationRestartRequestAgentVersionTypedDict, - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationResponse: - r"""Restart a conversation starting from a given entry. - - Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - - :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: - :param from_entry_id: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param metadata: Custom metadata for the conversation. - :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsRestartRequest( - conversation_id=conversation_id, - conversation_restart_request=models.ConversationRestartRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - metadata=metadata, - from_entry_id=from_entry_id, - agent_version=agent_version, - ), - ) - - req = self._build_request_async( - method="POST", - path="/v1/conversations/{conversation_id}/restart", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_restart_request, - False, - False, - "json", - models.ConversationRestartRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def start_stream( - self, - *, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - stream: Optional[bool] = True, - store: OptionalNullable[bool] = UNSET, - handoff_execution: OptionalNullable[ - models_conversationstreamrequest.ConversationStreamRequestHandoffExecution - ] = UNSET, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models_conversationstreamrequest.ConversationStreamRequestTools], - List[ - models_conversationstreamrequest.ConversationStreamRequestToolsTypedDict - ], - ] - ] = None, - completion_args: OptionalNullable[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - agent_version: OptionalNullable[ - Union[ - models_conversationstreamrequest.ConversationStreamRequestAgentVersion, - models_conversationstreamrequest.ConversationStreamRequestAgentVersionTypedDict, - ] - ] = UNSET, - model: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.ConversationEvents]: - r"""Create a conversation and append entries to it. - - Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. - - :param inputs: - :param stream: - :param store: - :param handoff_execution: - :param instructions: - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: - :param name: - :param description: - :param metadata: - :param agent_id: - :param agent_version: - :param model: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ConversationStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - instructions=instructions, - tools=utils.get_pydantic_model( - tools, Optional[List[models.ConversationStreamRequestTools]] - ), - completion_args=utils.get_pydantic_model( - completion_args, OptionalNullable[models.CompletionArgs] - ), - name=name, - description=description, - metadata=metadata, - agent_id=agent_id, - agent_version=agent_version, - model=model, - ) - - req = self._build_request( - method="POST", - path="/v1/conversations#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ConversationStreamRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_start_stream", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - async def start_stream_async( - self, - *, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - stream: Optional[bool] = True, - store: OptionalNullable[bool] = UNSET, - handoff_execution: OptionalNullable[ - models_conversationstreamrequest.ConversationStreamRequestHandoffExecution - ] = UNSET, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models_conversationstreamrequest.ConversationStreamRequestTools], - List[ - models_conversationstreamrequest.ConversationStreamRequestToolsTypedDict - ], - ] - ] = None, - completion_args: OptionalNullable[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - agent_version: OptionalNullable[ - Union[ - models_conversationstreamrequest.ConversationStreamRequestAgentVersion, - models_conversationstreamrequest.ConversationStreamRequestAgentVersionTypedDict, - ] - ] = UNSET, - model: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: - r"""Create a conversation and append entries to it. - - Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. - - :param inputs: - :param stream: - :param store: - :param handoff_execution: - :param instructions: - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: - :param name: - :param description: - :param metadata: - :param agent_id: - :param agent_version: - :param model: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ConversationStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - instructions=instructions, - tools=utils.get_pydantic_model( - tools, Optional[List[models.ConversationStreamRequestTools]] - ), - completion_args=utils.get_pydantic_model( - completion_args, OptionalNullable[models.CompletionArgs] - ), - name=name, - description=description, - metadata=metadata, - agent_id=agent_id, - agent_version=agent_version, - model=model, - ) - - req = self._build_request_async( - method="POST", - path="/v1/conversations#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ConversationStreamRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_start_stream", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - def append_stream( - self, - *, - conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - stream: Optional[bool] = True, - store: Optional[bool] = True, - handoff_execution: Optional[ - models_conversationappendstreamrequest.ConversationAppendStreamRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.ConversationEvents]: - r"""Append new entries to an existing conversation. - - Run completion on the history of the conversation and the user entries. Return the new created entries. - - :param conversation_id: ID of the conversation to which we append entries. - :param inputs: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsAppendStreamRequest( - conversation_id=conversation_id, - conversation_append_stream_request=models.ConversationAppendStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - ), - ) - - req = self._build_request( - method="POST", - path="/v1/conversations/{conversation_id}#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_append_stream_request, - False, - False, - "json", - models.ConversationAppendStreamRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_append_stream", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - async def append_stream_async( - self, - *, - conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - stream: Optional[bool] = True, - store: Optional[bool] = True, - handoff_execution: Optional[ - models_conversationappendstreamrequest.ConversationAppendStreamRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: - r"""Append new entries to an existing conversation. - - Run completion on the history of the conversation and the user entries. Return the new created entries. - - :param conversation_id: ID of the conversation to which we append entries. - :param inputs: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsAppendStreamRequest( - conversation_id=conversation_id, - conversation_append_stream_request=models.ConversationAppendStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - ), - ) - - req = self._build_request_async( - method="POST", - path="/v1/conversations/{conversation_id}#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_append_stream_request, - False, - False, - "json", - models.ConversationAppendStreamRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_append_stream", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - def restart_stream( - self, - *, - conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - from_entry_id: str, - stream: Optional[bool] = True, - store: Optional[bool] = True, - handoff_execution: Optional[ - models_conversationrestartstreamrequest.ConversationRestartStreamRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_version: OptionalNullable[ - Union[ - models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersion, - models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersionTypedDict, - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.ConversationEvents]: - r"""Restart a conversation starting from a given entry. - - Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - - :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: - :param from_entry_id: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param metadata: Custom metadata for the conversation. - :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsRestartStreamRequest( - conversation_id=conversation_id, - conversation_restart_stream_request=models.ConversationRestartStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - metadata=metadata, - from_entry_id=from_entry_id, - agent_version=agent_version, - ), - ) - - req = self._build_request( - method="POST", - path="/v1/conversations/{conversation_id}/restart#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_restart_stream_request, - False, - False, - "json", - models.ConversationRestartStreamRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart_stream", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - async def restart_stream_async( - self, - *, - conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - from_entry_id: str, - stream: Optional[bool] = True, - store: Optional[bool] = True, - handoff_execution: Optional[ - models_conversationrestartstreamrequest.ConversationRestartStreamRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_version: OptionalNullable[ - Union[ - models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersion, - models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersionTypedDict, - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: - r"""Restart a conversation starting from a given entry. - - Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - - :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: - :param from_entry_id: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param metadata: Custom metadata for the conversation. - :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsRestartStreamRequest( - conversation_id=conversation_id, - conversation_restart_stream_request=models.ConversationRestartStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - metadata=metadata, - from_entry_id=from_entry_id, - agent_version=agent_version, - ), - ) - - req = self._build_request_async( - method="POST", - path="/v1/conversations/{conversation_id}/restart#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_restart_stream_request, - False, - False, - "json", - models.ConversationRestartStreamRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart_stream", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/documents.py b/src/mistralai/documents.py deleted file mode 100644 index fac58fdb..00000000 --- a/src/mistralai/documents.py +++ /dev/null @@ -1,1981 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - documentupdatein as models_documentupdatein, - file as models_file, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, Mapping, Optional, Union - - -class Documents(BaseSDK): - r"""(beta) Libraries API - manage documents in a library.""" - - def list( - self, - *, - library_id: str, - search: OptionalNullable[str] = UNSET, - page_size: Optional[int] = 100, - page: Optional[int] = 0, - filters_attributes: OptionalNullable[str] = UNSET, - sort_by: Optional[str] = "created_at", - sort_order: Optional[str] = "desc", - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListDocumentOut: - r"""List documents in a given library. - - Given a library, lists the document that have been uploaded to that library. - - :param library_id: - :param search: - :param page_size: - :param page: - :param filters_attributes: - :param sort_by: - :param sort_order: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsListV1Request( - library_id=library_id, - search=search, - page_size=page_size, - page=page, - filters_attributes=filters_attributes, - sort_by=sort_by, - sort_order=sort_order, - ) - - req = self._build_request( - method="GET", - path="/v1/libraries/{library_id}/documents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_list_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListDocumentOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_async( - self, - *, - library_id: str, - search: OptionalNullable[str] = UNSET, - page_size: Optional[int] = 100, - page: Optional[int] = 0, - filters_attributes: OptionalNullable[str] = UNSET, - sort_by: Optional[str] = "created_at", - sort_order: Optional[str] = "desc", - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListDocumentOut: - r"""List documents in a given library. - - Given a library, lists the document that have been uploaded to that library. - - :param library_id: - :param search: - :param page_size: - :param page: - :param filters_attributes: - :param sort_by: - :param sort_order: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsListV1Request( - library_id=library_id, - search=search, - page_size=page_size, - page=page, - filters_attributes=filters_attributes, - sort_by=sort_by, - sort_order=sort_order, - ) - - req = self._build_request_async( - method="GET", - path="/v1/libraries/{library_id}/documents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_list_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListDocumentOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def upload( - self, - *, - library_id: str, - file: Union[models_file.File, models_file.FileTypedDict], - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: - r"""Upload a new document. - - Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search - - :param library_id: - :param file: The File object (not file name) to be uploaded. - To upload a file and specify a custom file name you should format your request as such: - ```bash - file=@path/to/your/file.jsonl;filename=custom_name.jsonl - ``` - Otherwise, you can just keep the original file name: - ```bash - file=@path/to/your/file.jsonl - ``` - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsUploadV1Request( - library_id=library_id, - request_body=models.LibrariesDocumentsUploadV1DocumentUpload( - file=utils.get_pydantic_model(file, models.File), - ), - ) - - req = self._build_request( - method="POST", - path="/v1/libraries/{library_id}/documents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.request_body, - False, - False, - "multipart", - models.LibrariesDocumentsUploadV1DocumentUpload, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_upload_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, ["200", "201"], "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def upload_async( - self, - *, - library_id: str, - file: Union[models_file.File, models_file.FileTypedDict], - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: - r"""Upload a new document. - - Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search - - :param library_id: - :param file: The File object (not file name) to be uploaded. - To upload a file and specify a custom file name you should format your request as such: - ```bash - file=@path/to/your/file.jsonl;filename=custom_name.jsonl - ``` - Otherwise, you can just keep the original file name: - ```bash - file=@path/to/your/file.jsonl - ``` - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsUploadV1Request( - library_id=library_id, - request_body=models.LibrariesDocumentsUploadV1DocumentUpload( - file=utils.get_pydantic_model(file, models.File), - ), - ) - - req = self._build_request_async( - method="POST", - path="/v1/libraries/{library_id}/documents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.request_body, - False, - False, - "multipart", - models.LibrariesDocumentsUploadV1DocumentUpload, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_upload_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, ["200", "201"], "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: - r"""Retrieve the metadata of a specific document. - - Given a library and a document in this library, you can retrieve the metadata of that document. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_async( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: - r"""Retrieve the metadata of a specific document. - - Given a library and a document in this library, you can retrieve the metadata of that document. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def update( - self, - *, - library_id: str, - document_id: str, - name: OptionalNullable[str] = UNSET, - attributes: OptionalNullable[ - Union[ - Dict[str, models_documentupdatein.Attributes], - Dict[str, models_documentupdatein.AttributesTypedDict], - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: - r"""Update the metadata of a specific document. - - Given a library and a document in that library, update the name of that document. - - :param library_id: - :param document_id: - :param name: - :param attributes: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsUpdateV1Request( - library_id=library_id, - document_id=document_id, - document_update_in=models.DocumentUpdateIn( - name=name, - attributes=attributes, - ), - ) - - req = self._build_request( - method="PUT", - path="/v1/libraries/{library_id}/documents/{document_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.document_update_in, - False, - False, - "json", - models.DocumentUpdateIn, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_update_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def update_async( - self, - *, - library_id: str, - document_id: str, - name: OptionalNullable[str] = UNSET, - attributes: OptionalNullable[ - Union[ - Dict[str, models_documentupdatein.Attributes], - Dict[str, models_documentupdatein.AttributesTypedDict], - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: - r"""Update the metadata of a specific document. - - Given a library and a document in that library, update the name of that document. - - :param library_id: - :param document_id: - :param name: - :param attributes: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsUpdateV1Request( - library_id=library_id, - document_id=document_id, - document_update_in=models.DocumentUpdateIn( - name=name, - attributes=attributes, - ), - ) - - req = self._build_request_async( - method="PUT", - path="/v1/libraries/{library_id}/documents/{document_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.document_update_in, - False, - False, - "json", - models.DocumentUpdateIn, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_update_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def delete( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ): - r"""Delete a document. - - Given a library and a document in that library, delete that document. The document will be deleted from the library and the search index. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsDeleteV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request( - method="DELETE", - path="/v1/libraries/{library_id}/documents/{document_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_delete_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "204", "*"): - return - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def delete_async( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ): - r"""Delete a document. - - Given a library and a document in that library, delete that document. The document will be deleted from the library and the search index. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsDeleteV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request_async( - method="DELETE", - path="/v1/libraries/{library_id}/documents/{document_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_delete_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "204", "*"): - return - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def text_content( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentTextContent: - r"""Retrieve the text content of a specific document. - - Given a library and a document in that library, you can retrieve the text content of that document if it exists. For documents like pdf, docx and pptx the text content results from our processing using Mistral OCR. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetTextContentV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}/text_content", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_text_content_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentTextContent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def text_content_async( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentTextContent: - r"""Retrieve the text content of a specific document. - - Given a library and a document in that library, you can retrieve the text content of that document if it exists. For documents like pdf, docx and pptx the text content results from our processing using Mistral OCR. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetTextContentV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}/text_content", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_text_content_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentTextContent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def status( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ProcessingStatusOut: - r"""Retrieve the processing status of a specific document. - - Given a library and a document in that library, retrieve the processing status of that document. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetStatusV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}/status", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_status_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ProcessingStatusOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def status_async( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ProcessingStatusOut: - r"""Retrieve the processing status of a specific document. - - Given a library and a document in that library, retrieve the processing status of that document. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetStatusV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}/status", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_status_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ProcessingStatusOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get_signed_url( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> str: - r"""Retrieve the signed URL of a specific document. - - Given a library and a document in that library, retrieve the signed URL of a specific document.The url will expire after 30 minutes and can be accessed by anyone with the link. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetSignedURLV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}/signed-url", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_signed_url_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(str, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_signed_url_async( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> str: - r"""Retrieve the signed URL of a specific document. - - Given a library and a document in that library, retrieve the signed URL of a specific document.The url will expire after 30 minutes and can be accessed by anyone with the link. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetSignedURLV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}/signed-url", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_signed_url_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(str, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def extracted_text_signed_url( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> str: - r"""Retrieve the signed URL of text extracted from a given document. - - Given a library and a document in that library, retrieve the signed URL of text extracted. For documents that are sent to the OCR this returns the result of the OCR queries. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}/extracted-text-signed-url", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_extracted_text_signed_url_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(str, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def extracted_text_signed_url_async( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> str: - r"""Retrieve the signed URL of text extracted from a given document. - - Given a library and a document in that library, retrieve the signed URL of text extracted. For documents that are sent to the OCR this returns the result of the OCR queries. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}/extracted-text-signed-url", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_extracted_text_signed_url_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(str, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def reprocess( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ): - r"""Reprocess a document. - - Given a library and a document in that library, reprocess that document, it will be billed again. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsReprocessV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request( - method="POST", - path="/v1/libraries/{library_id}/documents/{document_id}/reprocess", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_reprocess_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "204", "*"): - return - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def reprocess_async( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ): - r"""Reprocess a document. - - Given a library and a document in that library, reprocess that document, it will be billed again. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsReprocessV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request_async( - method="POST", - path="/v1/libraries/{library_id}/documents/{document_id}/reprocess", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_reprocess_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "204", "*"): - return - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py deleted file mode 100644 index 7430f804..00000000 --- a/src/mistralai/embeddings.py +++ /dev/null @@ -1,240 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - embeddingdtype as models_embeddingdtype, - embeddingrequest as models_embeddingrequest, - encodingformat as models_encodingformat, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, Mapping, Optional, Union - - -class Embeddings(BaseSDK): - r"""Embeddings API.""" - - def create( - self, - *, - model: str, - inputs: Union[ - models_embeddingrequest.EmbeddingRequestInputs, - models_embeddingrequest.EmbeddingRequestInputsTypedDict, - ], - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - output_dimension: OptionalNullable[int] = UNSET, - output_dtype: Optional[models_embeddingdtype.EmbeddingDtype] = None, - encoding_format: Optional[models_encodingformat.EncodingFormat] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.EmbeddingResponse: - r"""Embeddings - - Embeddings - - :param model: The ID of the model to be used for embedding. - :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. - :param metadata: - :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. - :param output_dtype: - :param encoding_format: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.EmbeddingRequest( - model=model, - metadata=metadata, - inputs=inputs, - output_dimension=output_dimension, - output_dtype=output_dtype, - encoding_format=encoding_format, - ) - - req = self._build_request( - method="POST", - path="/v1/embeddings", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.EmbeddingRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="embeddings_v1_embeddings_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.EmbeddingResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def create_async( - self, - *, - model: str, - inputs: Union[ - models_embeddingrequest.EmbeddingRequestInputs, - models_embeddingrequest.EmbeddingRequestInputsTypedDict, - ], - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - output_dimension: OptionalNullable[int] = UNSET, - output_dtype: Optional[models_embeddingdtype.EmbeddingDtype] = None, - encoding_format: Optional[models_encodingformat.EncodingFormat] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.EmbeddingResponse: - r"""Embeddings - - Embeddings - - :param model: The ID of the model to be used for embedding. - :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. - :param metadata: - :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. - :param output_dtype: - :param encoding_format: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.EmbeddingRequest( - model=model, - metadata=metadata, - inputs=inputs, - output_dimension=output_dimension, - output_dtype=output_dtype, - encoding_format=encoding_format, - ) - - req = self._build_request_async( - method="POST", - path="/v1/embeddings", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.EmbeddingRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="embeddings_v1_embeddings_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.EmbeddingResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/files.py b/src/mistralai/files.py deleted file mode 100644 index 90ada0ff..00000000 --- a/src/mistralai/files.py +++ /dev/null @@ -1,1120 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -import httpx -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - file as models_file, - filepurpose as models_filepurpose, - sampletype as models_sampletype, - source as models_source, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import List, Mapping, Optional, Union - - -class Files(BaseSDK): - r"""Files API""" - - def upload( - self, - *, - file: Union[models_file.File, models_file.FileTypedDict], - purpose: Optional[models_filepurpose.FilePurpose] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UploadFileOut: - r"""Upload File - - Upload a file that can be used across various endpoints. - - The size of individual files can be a maximum of 512 MB. The Fine-tuning API only supports .jsonl files. - - Please contact us if you need to increase these storage limits. - - :param file: The File object (not file name) to be uploaded. - To upload a file and specify a custom file name you should format your request as such: - ```bash - file=@path/to/your/file.jsonl;filename=custom_name.jsonl - ``` - Otherwise, you can just keep the original file name: - ```bash - file=@path/to/your/file.jsonl - ``` - :param purpose: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( - purpose=purpose, - file=utils.get_pydantic_model(file, models.File), - ) - - req = self._build_request( - method="POST", - path="/v1/files", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, - False, - False, - "multipart", - models.FilesAPIRoutesUploadFileMultiPartBodyParams, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_upload_file", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UploadFileOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def upload_async( - self, - *, - file: Union[models_file.File, models_file.FileTypedDict], - purpose: Optional[models_filepurpose.FilePurpose] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UploadFileOut: - r"""Upload File - - Upload a file that can be used across various endpoints. - - The size of individual files can be a maximum of 512 MB. The Fine-tuning API only supports .jsonl files. - - Please contact us if you need to increase these storage limits. - - :param file: The File object (not file name) to be uploaded. - To upload a file and specify a custom file name you should format your request as such: - ```bash - file=@path/to/your/file.jsonl;filename=custom_name.jsonl - ``` - Otherwise, you can just keep the original file name: - ```bash - file=@path/to/your/file.jsonl - ``` - :param purpose: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( - purpose=purpose, - file=utils.get_pydantic_model(file, models.File), - ) - - req = self._build_request_async( - method="POST", - path="/v1/files", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, - False, - False, - "multipart", - models.FilesAPIRoutesUploadFileMultiPartBodyParams, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_upload_file", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UploadFileOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def list( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - include_total: Optional[bool] = True, - sample_type: OptionalNullable[List[models_sampletype.SampleType]] = UNSET, - source: OptionalNullable[List[models_source.Source]] = UNSET, - search: OptionalNullable[str] = UNSET, - purpose: OptionalNullable[models_filepurpose.FilePurpose] = UNSET, - mimetypes: OptionalNullable[List[str]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListFilesOut: - r"""List Files - - Returns a list of files that belong to the user's organization. - - :param page: - :param page_size: - :param include_total: - :param sample_type: - :param source: - :param search: - :param purpose: - :param mimetypes: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesListFilesRequest( - page=page, - page_size=page_size, - include_total=include_total, - sample_type=sample_type, - source=source, - search=search, - purpose=purpose, - mimetypes=mimetypes, - ) - - req = self._build_request( - method="GET", - path="/v1/files", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_list_files", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListFilesOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_async( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - include_total: Optional[bool] = True, - sample_type: OptionalNullable[List[models_sampletype.SampleType]] = UNSET, - source: OptionalNullable[List[models_source.Source]] = UNSET, - search: OptionalNullable[str] = UNSET, - purpose: OptionalNullable[models_filepurpose.FilePurpose] = UNSET, - mimetypes: OptionalNullable[List[str]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListFilesOut: - r"""List Files - - Returns a list of files that belong to the user's organization. - - :param page: - :param page_size: - :param include_total: - :param sample_type: - :param source: - :param search: - :param purpose: - :param mimetypes: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesListFilesRequest( - page=page, - page_size=page_size, - include_total=include_total, - sample_type=sample_type, - source=source, - search=search, - purpose=purpose, - mimetypes=mimetypes, - ) - - req = self._build_request_async( - method="GET", - path="/v1/files", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_list_files", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListFilesOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def retrieve( - self, - *, - file_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveFileOut: - r"""Retrieve File - - Returns information about a specific file. - - :param file_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesRetrieveFileRequest( - file_id=file_id, - ) - - req = self._build_request( - method="GET", - path="/v1/files/{file_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_retrieve_file", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.RetrieveFileOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def retrieve_async( - self, - *, - file_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveFileOut: - r"""Retrieve File - - Returns information about a specific file. - - :param file_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesRetrieveFileRequest( - file_id=file_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/files/{file_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_retrieve_file", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.RetrieveFileOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def delete( - self, - *, - file_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DeleteFileOut: - r"""Delete File - - Delete a file. - - :param file_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesDeleteFileRequest( - file_id=file_id, - ) - - req = self._build_request( - method="DELETE", - path="/v1/files/{file_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_delete_file", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DeleteFileOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def delete_async( - self, - *, - file_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DeleteFileOut: - r"""Delete File - - Delete a file. - - :param file_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesDeleteFileRequest( - file_id=file_id, - ) - - req = self._build_request_async( - method="DELETE", - path="/v1/files/{file_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_delete_file", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DeleteFileOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def download( - self, - *, - file_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> httpx.Response: - r"""Download File - - Download a file - - :param file_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesDownloadFileRequest( - file_id=file_id, - ) - - req = self._build_request( - method="GET", - path="/v1/files/{file_id}/content", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/octet-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_download_file", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/octet-stream"): - return http_res - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - async def download_async( - self, - *, - file_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> httpx.Response: - r"""Download File - - Download a file - - :param file_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesDownloadFileRequest( - file_id=file_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/files/{file_id}/content", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/octet-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_download_file", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/octet-stream"): - return http_res - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - def get_signed_url( - self, - *, - file_id: str, - expiry: Optional[int] = 24, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FileSignedURL: - r"""Get Signed Url - - :param file_id: - :param expiry: Number of hours before the url becomes invalid. Defaults to 24h - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesGetSignedURLRequest( - file_id=file_id, - expiry=expiry, - ) - - req = self._build_request( - method="GET", - path="/v1/files/{file_id}/url", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_get_signed_url", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.FileSignedURL, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_signed_url_async( - self, - *, - file_id: str, - expiry: Optional[int] = 24, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FileSignedURL: - r"""Get Signed Url - - :param file_id: - :param expiry: Number of hours before the url becomes invalid. Defaults to 24h - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesGetSignedURLRequest( - file_id=file_id, - expiry=expiry, - ) - - req = self._build_request_async( - method="GET", - path="/v1/files/{file_id}/url", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_get_signed_url", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.FileSignedURL, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py deleted file mode 100644 index 53109c70..00000000 --- a/src/mistralai/fim.py +++ /dev/null @@ -1,545 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - fimcompletionrequest as models_fimcompletionrequest, - fimcompletionstreamrequest as models_fimcompletionstreamrequest, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import eventstreaming, get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, Mapping, Optional, Union - - -class Fim(BaseSDK): - r"""Fill-in-the-middle API.""" - - def complete( - self, - *, - model: str, - prompt: str, - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models_fimcompletionrequest.FIMCompletionRequestStop, - models_fimcompletionrequest.FIMCompletionRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - suffix: OptionalNullable[str] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FIMCompletionResponse: - r"""Fim Completion - - FIM completion. - - :param model: ID of the model with FIM to use. - :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. - :param min_tokens: The minimum number of tokens to generate in the completion. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FIMCompletionRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - prompt=prompt, - suffix=suffix, - min_tokens=min_tokens, - ) - - req = self._build_request( - method="POST", - path="/v1/fim/completions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.FIMCompletionRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="fim_completion_v1_fim_completions_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.FIMCompletionResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def complete_async( - self, - *, - model: str, - prompt: str, - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models_fimcompletionrequest.FIMCompletionRequestStop, - models_fimcompletionrequest.FIMCompletionRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - suffix: OptionalNullable[str] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FIMCompletionResponse: - r"""Fim Completion - - FIM completion. - - :param model: ID of the model with FIM to use. - :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. - :param min_tokens: The minimum number of tokens to generate in the completion. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FIMCompletionRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - prompt=prompt, - suffix=suffix, - min_tokens=min_tokens, - ) - - req = self._build_request_async( - method="POST", - path="/v1/fim/completions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.FIMCompletionRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="fim_completion_v1_fim_completions_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.FIMCompletionResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def stream( - self, - *, - model: str, - prompt: str, - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[ - Union[ - models_fimcompletionstreamrequest.FIMCompletionStreamRequestStop, - models_fimcompletionstreamrequest.FIMCompletionStreamRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - suffix: OptionalNullable[str] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.CompletionEvent]: - r"""Stream fim completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param model: ID of the model with FIM to use. - :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. - :param min_tokens: The minimum number of tokens to generate in the completion. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FIMCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - prompt=prompt, - suffix=suffix, - min_tokens=min_tokens, - ) - - req = self._build_request( - method="POST", - path="/v1/fim/completions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.FIMCompletionStreamRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="stream_fim", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - async def stream_async( - self, - *, - model: str, - prompt: str, - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[ - Union[ - models_fimcompletionstreamrequest.FIMCompletionStreamRequestStop, - models_fimcompletionstreamrequest.FIMCompletionStreamRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - suffix: OptionalNullable[str] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: - r"""Stream fim completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param model: ID of the model with FIM to use. - :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. - :param min_tokens: The minimum number of tokens to generate in the completion. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FIMCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - prompt=prompt, - suffix=suffix, - min_tokens=min_tokens, - ) - - req = self._build_request_async( - method="POST", - path="/v1/fim/completions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.FIMCompletionStreamRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="stream_fim", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/fine_tuning.py b/src/mistralai/fine_tuning.py deleted file mode 100644 index 8ed5788a..00000000 --- a/src/mistralai/fine_tuning.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from .sdkconfiguration import SDKConfiguration -from mistralai.jobs import Jobs -from typing import Optional - - -class FineTuning(BaseSDK): - jobs: Jobs - - def __init__( - self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None - ) -> None: - BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) - self.sdk_configuration = sdk_config - self._init_sdks() - - def _init_sdks(self): - self.jobs = Jobs(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/httpclient.py b/src/mistralai/httpclient.py deleted file mode 100644 index 89560b56..00000000 --- a/src/mistralai/httpclient.py +++ /dev/null @@ -1,125 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -# pyright: reportReturnType = false -import asyncio -from typing_extensions import Protocol, runtime_checkable -import httpx -from typing import Any, Optional, Union - - -@runtime_checkable -class HttpClient(Protocol): - def send( - self, - request: httpx.Request, - *, - stream: bool = False, - auth: Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault, None - ] = httpx.USE_CLIENT_DEFAULT, - follow_redirects: Union[ - bool, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - ) -> httpx.Response: - pass - - def build_request( - self, - method: str, - url: httpx._types.URLTypes, - *, - content: Optional[httpx._types.RequestContent] = None, - data: Optional[httpx._types.RequestData] = None, - files: Optional[httpx._types.RequestFiles] = None, - json: Optional[Any] = None, - params: Optional[httpx._types.QueryParamTypes] = None, - headers: Optional[httpx._types.HeaderTypes] = None, - cookies: Optional[httpx._types.CookieTypes] = None, - timeout: Union[ - httpx._types.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - extensions: Optional[httpx._types.RequestExtensions] = None, - ) -> httpx.Request: - pass - - def close(self) -> None: - pass - - -@runtime_checkable -class AsyncHttpClient(Protocol): - async def send( - self, - request: httpx.Request, - *, - stream: bool = False, - auth: Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault, None - ] = httpx.USE_CLIENT_DEFAULT, - follow_redirects: Union[ - bool, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - ) -> httpx.Response: - pass - - def build_request( - self, - method: str, - url: httpx._types.URLTypes, - *, - content: Optional[httpx._types.RequestContent] = None, - data: Optional[httpx._types.RequestData] = None, - files: Optional[httpx._types.RequestFiles] = None, - json: Optional[Any] = None, - params: Optional[httpx._types.QueryParamTypes] = None, - headers: Optional[httpx._types.HeaderTypes] = None, - cookies: Optional[httpx._types.CookieTypes] = None, - timeout: Union[ - httpx._types.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - extensions: Optional[httpx._types.RequestExtensions] = None, - ) -> httpx.Request: - pass - - async def aclose(self) -> None: - pass - - -class ClientOwner(Protocol): - client: Union[HttpClient, None] - async_client: Union[AsyncHttpClient, None] - - -def close_clients( - owner: ClientOwner, - sync_client: Union[HttpClient, None], - sync_client_supplied: bool, - async_client: Union[AsyncHttpClient, None], - async_client_supplied: bool, -) -> None: - """ - A finalizer function that is meant to be used with weakref.finalize to close - httpx clients used by an SDK so that underlying resources can be garbage - collected. - """ - - # Unset the client/async_client properties so there are no more references - # to them from the owning SDK instance and they can be reaped. - owner.client = None - owner.async_client = None - if sync_client is not None and not sync_client_supplied: - try: - sync_client.close() - except Exception: - pass - - if async_client is not None and not async_client_supplied: - try: - loop = asyncio.get_running_loop() - asyncio.run_coroutine_threadsafe(async_client.aclose(), loop) - except RuntimeError: - try: - asyncio.run(async_client.aclose()) - except RuntimeError: - # best effort - pass diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py deleted file mode 100644 index df8ae4d3..00000000 --- a/src/mistralai/jobs.py +++ /dev/null @@ -1,1067 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from datetime import datetime -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - classifiertargetin as models_classifiertargetin, - finetuneablemodeltype as models_finetuneablemodeltype, - jobin as models_jobin, - jobs_api_routes_fine_tuning_get_fine_tuning_jobsop as models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop, - trainingfile as models_trainingfile, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import List, Mapping, Optional, Union - - -class Jobs(BaseSDK): - def list( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - model: OptionalNullable[str] = UNSET, - created_after: OptionalNullable[datetime] = UNSET, - created_before: OptionalNullable[datetime] = UNSET, - created_by_me: Optional[bool] = False, - status: OptionalNullable[ - models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.QueryParamStatus - ] = UNSET, - wandb_project: OptionalNullable[str] = UNSET, - wandb_name: OptionalNullable[str] = UNSET, - suffix: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsOut: - r"""Get Fine Tuning Jobs - - Get a list of fine-tuning jobs for your organization and user. - - :param page: The page number of the results to be returned. - :param page_size: The number of items to return per page. - :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. - :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. - :param created_before: - :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. - :param status: The current job state to filter on. When set, the other results are not displayed. - :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. - :param wandb_name: The Weight and Biases run name to filter on. When set, the other results are not displayed. - :param suffix: The model suffix to filter on. When set, the other results are not displayed. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( - page=page, - page_size=page_size, - model=model, - created_after=created_after, - created_before=created_before, - created_by_me=created_by_me, - status=status, - wandb_project=wandb_project, - wandb_name=wandb_name, - suffix=suffix, - ) - - req = self._build_request( - method="GET", - path="/v1/fine_tuning/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.JobsOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_async( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - model: OptionalNullable[str] = UNSET, - created_after: OptionalNullable[datetime] = UNSET, - created_before: OptionalNullable[datetime] = UNSET, - created_by_me: Optional[bool] = False, - status: OptionalNullable[ - models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.QueryParamStatus - ] = UNSET, - wandb_project: OptionalNullable[str] = UNSET, - wandb_name: OptionalNullable[str] = UNSET, - suffix: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsOut: - r"""Get Fine Tuning Jobs - - Get a list of fine-tuning jobs for your organization and user. - - :param page: The page number of the results to be returned. - :param page_size: The number of items to return per page. - :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. - :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. - :param created_before: - :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. - :param status: The current job state to filter on. When set, the other results are not displayed. - :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. - :param wandb_name: The Weight and Biases run name to filter on. When set, the other results are not displayed. - :param suffix: The model suffix to filter on. When set, the other results are not displayed. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( - page=page, - page_size=page_size, - model=model, - created_after=created_after, - created_before=created_before, - created_by_me=created_by_me, - status=status, - wandb_project=wandb_project, - wandb_name=wandb_name, - suffix=suffix, - ) - - req = self._build_request_async( - method="GET", - path="/v1/fine_tuning/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.JobsOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def create( - self, - *, - model: str, - hyperparameters: Union[ - models_jobin.Hyperparameters, models_jobin.HyperparametersTypedDict - ], - training_files: Optional[ - Union[ - List[models_trainingfile.TrainingFile], - List[models_trainingfile.TrainingFileTypedDict], - ] - ] = None, - validation_files: OptionalNullable[List[str]] = UNSET, - suffix: OptionalNullable[str] = UNSET, - integrations: OptionalNullable[ - Union[ - List[models_jobin.JobInIntegrations], - List[models_jobin.JobInIntegrationsTypedDict], - ] - ] = UNSET, - auto_start: Optional[bool] = None, - invalid_sample_skip_percentage: Optional[float] = 0, - job_type: OptionalNullable[ - models_finetuneablemodeltype.FineTuneableModelType - ] = UNSET, - repositories: OptionalNullable[ - Union[ - List[models_jobin.JobInRepositories], - List[models_jobin.JobInRepositoriesTypedDict], - ] - ] = UNSET, - classifier_targets: OptionalNullable[ - Union[ - List[models_classifiertargetin.ClassifierTargetIn], - List[models_classifiertargetin.ClassifierTargetInTypedDict], - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: - r"""Create Fine Tuning Job - - Create a new fine-tuning job, it will be queued for processing. - - :param model: The name of the model to fine-tune. - :param hyperparameters: - :param training_files: - :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. - :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` - :param integrations: A list of integrations to enable for your fine-tuning job. - :param auto_start: This field will be required in a future release. - :param invalid_sample_skip_percentage: - :param job_type: - :param repositories: - :param classifier_targets: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobIn( - model=model, - training_files=utils.get_pydantic_model( - training_files, Optional[List[models.TrainingFile]] - ), - validation_files=validation_files, - suffix=suffix, - integrations=utils.get_pydantic_model( - integrations, OptionalNullable[List[models.JobInIntegrations]] - ), - auto_start=auto_start, - invalid_sample_skip_percentage=invalid_sample_skip_percentage, - job_type=job_type, - hyperparameters=utils.get_pydantic_model( - hyperparameters, models.Hyperparameters - ), - repositories=utils.get_pydantic_model( - repositories, OptionalNullable[List[models.JobInRepositories]] - ), - classifier_targets=utils.get_pydantic_model( - classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] - ), - ) - - req = self._build_request( - method="POST", - path="/v1/fine_tuning/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.JobIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def create_async( - self, - *, - model: str, - hyperparameters: Union[ - models_jobin.Hyperparameters, models_jobin.HyperparametersTypedDict - ], - training_files: Optional[ - Union[ - List[models_trainingfile.TrainingFile], - List[models_trainingfile.TrainingFileTypedDict], - ] - ] = None, - validation_files: OptionalNullable[List[str]] = UNSET, - suffix: OptionalNullable[str] = UNSET, - integrations: OptionalNullable[ - Union[ - List[models_jobin.JobInIntegrations], - List[models_jobin.JobInIntegrationsTypedDict], - ] - ] = UNSET, - auto_start: Optional[bool] = None, - invalid_sample_skip_percentage: Optional[float] = 0, - job_type: OptionalNullable[ - models_finetuneablemodeltype.FineTuneableModelType - ] = UNSET, - repositories: OptionalNullable[ - Union[ - List[models_jobin.JobInRepositories], - List[models_jobin.JobInRepositoriesTypedDict], - ] - ] = UNSET, - classifier_targets: OptionalNullable[ - Union[ - List[models_classifiertargetin.ClassifierTargetIn], - List[models_classifiertargetin.ClassifierTargetInTypedDict], - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: - r"""Create Fine Tuning Job - - Create a new fine-tuning job, it will be queued for processing. - - :param model: The name of the model to fine-tune. - :param hyperparameters: - :param training_files: - :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. - :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` - :param integrations: A list of integrations to enable for your fine-tuning job. - :param auto_start: This field will be required in a future release. - :param invalid_sample_skip_percentage: - :param job_type: - :param repositories: - :param classifier_targets: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobIn( - model=model, - training_files=utils.get_pydantic_model( - training_files, Optional[List[models.TrainingFile]] - ), - validation_files=validation_files, - suffix=suffix, - integrations=utils.get_pydantic_model( - integrations, OptionalNullable[List[models.JobInIntegrations]] - ), - auto_start=auto_start, - invalid_sample_skip_percentage=invalid_sample_skip_percentage, - job_type=job_type, - hyperparameters=utils.get_pydantic_model( - hyperparameters, models.Hyperparameters - ), - repositories=utils.get_pydantic_model( - repositories, OptionalNullable[List[models.JobInRepositories]] - ), - classifier_targets=utils.get_pydantic_model( - classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] - ), - ) - - req = self._build_request_async( - method="POST", - path="/v1/fine_tuning/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.JobIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: - r"""Get Fine Tuning Job - - Get a fine-tuned job details by its UUID. - - :param job_id: The ID of the job to analyse. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( - job_id=job_id, - ) - - req = self._build_request( - method="GET", - path="/v1/fine_tuning/jobs/{job_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_async( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: - r"""Get Fine Tuning Job - - Get a fine-tuned job details by its UUID. - - :param job_id: The ID of the job to analyse. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( - job_id=job_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/fine_tuning/jobs/{job_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def cancel( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: - r"""Cancel Fine Tuning Job - - Request the cancellation of a fine tuning job. - - :param job_id: The ID of the job to cancel. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( - job_id=job_id, - ) - - req = self._build_request( - method="POST", - path="/v1/fine_tuning/jobs/{job_id}/cancel", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def cancel_async( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: - r"""Cancel Fine Tuning Job - - Request the cancellation of a fine tuning job. - - :param job_id: The ID of the job to cancel. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( - job_id=job_id, - ) - - req = self._build_request_async( - method="POST", - path="/v1/fine_tuning/jobs/{job_id}/cancel", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def start( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: - r"""Start Fine Tuning Job - - Request the start of a validated fine tuning job. - - :param job_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( - job_id=job_id, - ) - - req = self._build_request( - method="POST", - path="/v1/fine_tuning/jobs/{job_id}/start", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def start_async( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: - r"""Start Fine Tuning Job - - Request the start of a validated fine tuning job. - - :param job_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( - job_id=job_id, - ) - - req = self._build_request_async( - method="POST", - path="/v1/fine_tuning/jobs/{job_id}/start", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/libraries.py b/src/mistralai/libraries.py deleted file mode 100644 index 32648937..00000000 --- a/src/mistralai/libraries.py +++ /dev/null @@ -1,946 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from .sdkconfiguration import SDKConfiguration -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.accesses import Accesses -from mistralai.documents import Documents -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Mapping, Optional - - -class Libraries(BaseSDK): - r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" - - documents: Documents - r"""(beta) Libraries API - manage documents in a library.""" - accesses: Accesses - r"""(beta) Libraries API - manage access to a library.""" - - def __init__( - self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None - ) -> None: - BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) - self.sdk_configuration = sdk_config - self._init_sdks() - - def _init_sdks(self): - self.documents = Documents(self.sdk_configuration, parent_ref=self.parent_ref) - self.accesses = Accesses(self.sdk_configuration, parent_ref=self.parent_ref) - - def list( - self, - *, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListLibraryOut: - r"""List all libraries you have access to. - - List all libraries that you have created or have been shared with you. - - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - req = self._build_request( - method="GET", - path="/v1/libraries", - base_url=base_url, - url_variables=url_variables, - request=None, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_list_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListLibraryOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_async( - self, - *, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListLibraryOut: - r"""List all libraries you have access to. - - List all libraries that you have created or have been shared with you. - - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - req = self._build_request_async( - method="GET", - path="/v1/libraries", - base_url=base_url, - url_variables=url_variables, - request=None, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_list_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListLibraryOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def create( - self, - *, - name: str, - description: OptionalNullable[str] = UNSET, - chunk_size: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: - r"""Create a new Library. - - Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. - - :param name: - :param description: - :param chunk_size: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibraryIn( - name=name, - description=description, - chunk_size=chunk_size, - ) - - req = self._build_request( - method="POST", - path="/v1/libraries", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.LibraryIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_create_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "201", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def create_async( - self, - *, - name: str, - description: OptionalNullable[str] = UNSET, - chunk_size: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: - r"""Create a new Library. - - Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. - - :param name: - :param description: - :param chunk_size: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibraryIn( - name=name, - description=description, - chunk_size=chunk_size, - ) - - req = self._build_request_async( - method="POST", - path="/v1/libraries", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.LibraryIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_create_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "201", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get( - self, - *, - library_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: - r"""Detailed information about a specific Library. - - Given a library id, details information about that Library. - - :param library_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesGetV1Request( - library_id=library_id, - ) - - req = self._build_request( - method="GET", - path="/v1/libraries/{library_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_get_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_async( - self, - *, - library_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: - r"""Detailed information about a specific Library. - - Given a library id, details information about that Library. - - :param library_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesGetV1Request( - library_id=library_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/libraries/{library_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_get_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def delete( - self, - *, - library_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: - r"""Delete a library and all of it's document. - - Given a library id, deletes it together with all documents that have been uploaded to that library. - - :param library_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDeleteV1Request( - library_id=library_id, - ) - - req = self._build_request( - method="DELETE", - path="/v1/libraries/{library_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_delete_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def delete_async( - self, - *, - library_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: - r"""Delete a library and all of it's document. - - Given a library id, deletes it together with all documents that have been uploaded to that library. - - :param library_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDeleteV1Request( - library_id=library_id, - ) - - req = self._build_request_async( - method="DELETE", - path="/v1/libraries/{library_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_delete_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def update( - self, - *, - library_id: str, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: - r"""Update a library. - - Given a library id, you can update the name and description. - - :param library_id: - :param name: - :param description: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesUpdateV1Request( - library_id=library_id, - library_in_update=models.LibraryInUpdate( - name=name, - description=description, - ), - ) - - req = self._build_request( - method="PUT", - path="/v1/libraries/{library_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.library_in_update, False, False, "json", models.LibraryInUpdate - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_update_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def update_async( - self, - *, - library_id: str, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: - r"""Update a library. - - Given a library id, you can update the name and description. - - :param library_id: - :param name: - :param description: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesUpdateV1Request( - library_id=library_id, - library_in_update=models.LibraryInUpdate( - name=name, - description=description, - ), - ) - - req = self._build_request_async( - method="PUT", - path="/v1/libraries/{library_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.library_in_update, False, False, "json", models.LibraryInUpdate - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_update_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/mistral_agents.py b/src/mistralai/mistral_agents.py deleted file mode 100644 index 7fb0ce25..00000000 --- a/src/mistralai/mistral_agents.py +++ /dev/null @@ -1,2080 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - agentcreationrequest as models_agentcreationrequest, - agents_api_v1_agents_getop as models_agents_api_v1_agents_getop, - agentupdaterequest as models_agentupdaterequest, - completionargs as models_completionargs, - requestsource as models_requestsource, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, List, Mapping, Optional, Union - - -class MistralAgents(BaseSDK): - r"""(beta) Agents API""" - - def create( - self, - *, - model: str, - name: str, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models_agentcreationrequest.AgentCreationRequestTools], - List[models_agentcreationrequest.AgentCreationRequestToolsTypedDict], - ] - ] = None, - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - description: OptionalNullable[str] = UNSET, - handoffs: OptionalNullable[List[str]] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Create a agent that can be used within a conversation. - - Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. - - :param model: - :param name: - :param instructions: Instruction prompt the model will follow during the conversation. - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: White-listed arguments from the completion API - :param description: - :param handoffs: - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentCreationRequest( - instructions=instructions, - tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentCreationRequestTools]] - ), - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - model=model, - name=name, - description=description, - handoffs=handoffs, - metadata=metadata, - ) - - req = self._build_request( - method="POST", - path="/v1/agents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.AgentCreationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_create", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def create_async( - self, - *, - model: str, - name: str, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models_agentcreationrequest.AgentCreationRequestTools], - List[models_agentcreationrequest.AgentCreationRequestToolsTypedDict], - ] - ] = None, - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - description: OptionalNullable[str] = UNSET, - handoffs: OptionalNullable[List[str]] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Create a agent that can be used within a conversation. - - Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. - - :param model: - :param name: - :param instructions: Instruction prompt the model will follow during the conversation. - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: White-listed arguments from the completion API - :param description: - :param handoffs: - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentCreationRequest( - instructions=instructions, - tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentCreationRequestTools]] - ), - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - model=model, - name=name, - description=description, - handoffs=handoffs, - metadata=metadata, - ) - - req = self._build_request_async( - method="POST", - path="/v1/agents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.AgentCreationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_create", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def list( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 20, - deployment_chat: OptionalNullable[bool] = UNSET, - sources: OptionalNullable[List[models_requestsource.RequestSource]] = UNSET, - name: OptionalNullable[str] = UNSET, - id: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.Agent]: - r"""List agent entities. - - Retrieve a list of agent entities sorted by creation time. - - :param page: Page number (0-indexed) - :param page_size: Number of agents per page - :param deployment_chat: - :param sources: - :param name: - :param id: - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsListRequest( - page=page, - page_size=page_size, - deployment_chat=deployment_chat, - sources=sources, - name=name, - id=id, - metadata=metadata, - ) - - req = self._build_request( - method="GET", - path="/v1/agents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_list", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.Agent], http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_async( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 20, - deployment_chat: OptionalNullable[bool] = UNSET, - sources: OptionalNullable[List[models_requestsource.RequestSource]] = UNSET, - name: OptionalNullable[str] = UNSET, - id: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.Agent]: - r"""List agent entities. - - Retrieve a list of agent entities sorted by creation time. - - :param page: Page number (0-indexed) - :param page_size: Number of agents per page - :param deployment_chat: - :param sources: - :param name: - :param id: - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsListRequest( - page=page, - page_size=page_size, - deployment_chat=deployment_chat, - sources=sources, - name=name, - id=id, - metadata=metadata, - ) - - req = self._build_request_async( - method="GET", - path="/v1/agents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_list", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.Agent], http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get( - self, - *, - agent_id: str, - agent_version: OptionalNullable[ - Union[ - models_agents_api_v1_agents_getop.QueryParamAgentVersion, - models_agents_api_v1_agents_getop.QueryParamAgentVersionTypedDict, - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Retrieve an agent entity. - - Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. - - :param agent_id: - :param agent_version: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsGetRequest( - agent_id=agent_id, - agent_version=agent_version, - ) - - req = self._build_request( - method="GET", - path="/v1/agents/{agent_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_get", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_async( - self, - *, - agent_id: str, - agent_version: OptionalNullable[ - Union[ - models_agents_api_v1_agents_getop.QueryParamAgentVersion, - models_agents_api_v1_agents_getop.QueryParamAgentVersionTypedDict, - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Retrieve an agent entity. - - Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. - - :param agent_id: - :param agent_version: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsGetRequest( - agent_id=agent_id, - agent_version=agent_version, - ) - - req = self._build_request_async( - method="GET", - path="/v1/agents/{agent_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_get", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def update( - self, - *, - agent_id: str, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models_agentupdaterequest.AgentUpdateRequestTools], - List[models_agentupdaterequest.AgentUpdateRequestToolsTypedDict], - ] - ] = None, - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - model: OptionalNullable[str] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - handoffs: OptionalNullable[List[str]] = UNSET, - deployment_chat: OptionalNullable[bool] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Update an agent entity. - - Update an agent attributes and create a new version. - - :param agent_id: - :param instructions: Instruction prompt the model will follow during the conversation. - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: White-listed arguments from the completion API - :param model: - :param name: - :param description: - :param handoffs: - :param deployment_chat: - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsUpdateRequest( - agent_id=agent_id, - agent_update_request=models.AgentUpdateRequest( - instructions=instructions, - tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentUpdateRequestTools]] - ), - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - model=model, - name=name, - description=description, - handoffs=handoffs, - deployment_chat=deployment_chat, - metadata=metadata, - ), - ) - - req = self._build_request( - method="PATCH", - path="/v1/agents/{agent_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.agent_update_request, - False, - False, - "json", - models.AgentUpdateRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_update", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def update_async( - self, - *, - agent_id: str, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models_agentupdaterequest.AgentUpdateRequestTools], - List[models_agentupdaterequest.AgentUpdateRequestToolsTypedDict], - ] - ] = None, - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - model: OptionalNullable[str] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - handoffs: OptionalNullable[List[str]] = UNSET, - deployment_chat: OptionalNullable[bool] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Update an agent entity. - - Update an agent attributes and create a new version. - - :param agent_id: - :param instructions: Instruction prompt the model will follow during the conversation. - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: White-listed arguments from the completion API - :param model: - :param name: - :param description: - :param handoffs: - :param deployment_chat: - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsUpdateRequest( - agent_id=agent_id, - agent_update_request=models.AgentUpdateRequest( - instructions=instructions, - tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentUpdateRequestTools]] - ), - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - model=model, - name=name, - description=description, - handoffs=handoffs, - deployment_chat=deployment_chat, - metadata=metadata, - ), - ) - - req = self._build_request_async( - method="PATCH", - path="/v1/agents/{agent_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.agent_update_request, - False, - False, - "json", - models.AgentUpdateRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_update", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def delete( - self, - *, - agent_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ): - r"""Delete an agent entity. - - :param agent_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsDeleteRequest( - agent_id=agent_id, - ) - - req = self._build_request( - method="DELETE", - path="/v1/agents/{agent_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_delete", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "204", "*"): - return - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def delete_async( - self, - *, - agent_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ): - r"""Delete an agent entity. - - :param agent_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsDeleteRequest( - agent_id=agent_id, - ) - - req = self._build_request_async( - method="DELETE", - path="/v1/agents/{agent_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_delete", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "204", "*"): - return - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def update_version( - self, - *, - agent_id: str, - version: int, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Update an agent version. - - Switch the version of an agent. - - :param agent_id: - :param version: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsUpdateVersionRequest( - agent_id=agent_id, - version=version, - ) - - req = self._build_request( - method="PATCH", - path="/v1/agents/{agent_id}/version", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_update_version", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def update_version_async( - self, - *, - agent_id: str, - version: int, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Update an agent version. - - Switch the version of an agent. - - :param agent_id: - :param version: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsUpdateVersionRequest( - agent_id=agent_id, - version=version, - ) - - req = self._build_request_async( - method="PATCH", - path="/v1/agents/{agent_id}/version", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_update_version", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def list_versions( - self, - *, - agent_id: str, - page: Optional[int] = 0, - page_size: Optional[int] = 20, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.Agent]: - r"""List all versions of an agent. - - Retrieve all versions for a specific agent with full agent context. Supports pagination. - - :param agent_id: - :param page: Page number (0-indexed) - :param page_size: Number of versions per page - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsListVersionsRequest( - agent_id=agent_id, - page=page, - page_size=page_size, - ) - - req = self._build_request( - method="GET", - path="/v1/agents/{agent_id}/versions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_list_versions", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.Agent], http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_versions_async( - self, - *, - agent_id: str, - page: Optional[int] = 0, - page_size: Optional[int] = 20, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.Agent]: - r"""List all versions of an agent. - - Retrieve all versions for a specific agent with full agent context. Supports pagination. - - :param agent_id: - :param page: Page number (0-indexed) - :param page_size: Number of versions per page - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsListVersionsRequest( - agent_id=agent_id, - page=page, - page_size=page_size, - ) - - req = self._build_request_async( - method="GET", - path="/v1/agents/{agent_id}/versions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_list_versions", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.Agent], http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get_version( - self, - *, - agent_id: str, - version: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Retrieve a specific version of an agent. - - Get a specific agent version by version number. - - :param agent_id: - :param version: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsGetVersionRequest( - agent_id=agent_id, - version=version, - ) - - req = self._build_request( - method="GET", - path="/v1/agents/{agent_id}/versions/{version}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_get_version", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_version_async( - self, - *, - agent_id: str, - version: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Retrieve a specific version of an agent. - - Get a specific agent version by version number. - - :param agent_id: - :param version: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsGetVersionRequest( - agent_id=agent_id, - version=version, - ) - - req = self._build_request_async( - method="GET", - path="/v1/agents/{agent_id}/versions/{version}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_get_version", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def create_version_alias( - self, - *, - agent_id: str, - alias: str, - version: int, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.AgentAliasResponse: - r"""Create or update an agent version alias. - - Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. - - :param agent_id: - :param alias: - :param version: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( - agent_id=agent_id, - alias=alias, - version=version, - ) - - req = self._build_request( - method="PUT", - path="/v1/agents/{agent_id}/aliases", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_create_or_update_alias", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.AgentAliasResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def create_version_alias_async( - self, - *, - agent_id: str, - alias: str, - version: int, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.AgentAliasResponse: - r"""Create or update an agent version alias. - - Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. - - :param agent_id: - :param alias: - :param version: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( - agent_id=agent_id, - alias=alias, - version=version, - ) - - req = self._build_request_async( - method="PUT", - path="/v1/agents/{agent_id}/aliases", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_create_or_update_alias", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.AgentAliasResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def list_version_aliases( - self, - *, - agent_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.AgentAliasResponse]: - r"""List all aliases for an agent. - - Retrieve all version aliases for a specific agent. - - :param agent_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsListVersionAliasesRequest( - agent_id=agent_id, - ) - - req = self._build_request( - method="GET", - path="/v1/agents/{agent_id}/aliases", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_list_version_aliases", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.AgentAliasResponse], http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_version_aliases_async( - self, - *, - agent_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.AgentAliasResponse]: - r"""List all aliases for an agent. - - Retrieve all version aliases for a specific agent. - - :param agent_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsListVersionAliasesRequest( - agent_id=agent_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/agents/{agent_id}/aliases", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_list_version_aliases", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.AgentAliasResponse], http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py deleted file mode 100644 index d1aeec8a..00000000 --- a/src/mistralai/mistral_jobs.py +++ /dev/null @@ -1,799 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from datetime import datetime -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - apiendpoint as models_apiendpoint, - batchjobstatus as models_batchjobstatus, - batchrequest as models_batchrequest, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, List, Mapping, Optional, Union - - -class MistralJobs(BaseSDK): - def list( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - model: OptionalNullable[str] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - created_after: OptionalNullable[datetime] = UNSET, - created_by_me: Optional[bool] = False, - status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobsOut: - r"""Get Batch Jobs - - Get a list of batch jobs for your organization and user. - - :param page: - :param page_size: - :param model: - :param agent_id: - :param metadata: - :param created_after: - :param created_by_me: - :param status: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesBatchGetBatchJobsRequest( - page=page, - page_size=page_size, - model=model, - agent_id=agent_id, - metadata=metadata, - created_after=created_after, - created_by_me=created_by_me, - status=status, - ) - - req = self._build_request( - method="GET", - path="/v1/batch/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_jobs", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobsOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_async( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - model: OptionalNullable[str] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - created_after: OptionalNullable[datetime] = UNSET, - created_by_me: Optional[bool] = False, - status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobsOut: - r"""Get Batch Jobs - - Get a list of batch jobs for your organization and user. - - :param page: - :param page_size: - :param model: - :param agent_id: - :param metadata: - :param created_after: - :param created_by_me: - :param status: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesBatchGetBatchJobsRequest( - page=page, - page_size=page_size, - model=model, - agent_id=agent_id, - metadata=metadata, - created_after=created_after, - created_by_me=created_by_me, - status=status, - ) - - req = self._build_request_async( - method="GET", - path="/v1/batch/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_jobs", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobsOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def create( - self, - *, - endpoint: models_apiendpoint.APIEndpoint, - input_files: OptionalNullable[List[str]] = UNSET, - requests: OptionalNullable[ - Union[ - List[models_batchrequest.BatchRequest], - List[models_batchrequest.BatchRequestTypedDict], - ] - ] = UNSET, - model: OptionalNullable[str] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, str]] = UNSET, - timeout_hours: Optional[int] = 24, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: - r"""Create Batch Job - - Create a new batch job, it will be queued for processing. - - :param endpoint: - :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` - :param requests: - :param model: The model to be used for batch inference. - :param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. - :param metadata: The metadata of your choice to be associated with the batch inference job. - :param timeout_hours: The timeout in hours for the batch inference job. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.BatchJobIn( - input_files=input_files, - requests=utils.get_pydantic_model( - requests, OptionalNullable[List[models.BatchRequest]] - ), - endpoint=endpoint, - model=model, - agent_id=agent_id, - metadata=metadata, - timeout_hours=timeout_hours, - ) - - req = self._build_request( - method="POST", - path="/v1/batch/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.BatchJobIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_create_batch_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def create_async( - self, - *, - endpoint: models_apiendpoint.APIEndpoint, - input_files: OptionalNullable[List[str]] = UNSET, - requests: OptionalNullable[ - Union[ - List[models_batchrequest.BatchRequest], - List[models_batchrequest.BatchRequestTypedDict], - ] - ] = UNSET, - model: OptionalNullable[str] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, str]] = UNSET, - timeout_hours: Optional[int] = 24, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: - r"""Create Batch Job - - Create a new batch job, it will be queued for processing. - - :param endpoint: - :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` - :param requests: - :param model: The model to be used for batch inference. - :param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. - :param metadata: The metadata of your choice to be associated with the batch inference job. - :param timeout_hours: The timeout in hours for the batch inference job. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.BatchJobIn( - input_files=input_files, - requests=utils.get_pydantic_model( - requests, OptionalNullable[List[models.BatchRequest]] - ), - endpoint=endpoint, - model=model, - agent_id=agent_id, - metadata=metadata, - timeout_hours=timeout_hours, - ) - - req = self._build_request_async( - method="POST", - path="/v1/batch/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.BatchJobIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_create_batch_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get( - self, - *, - job_id: str, - inline: OptionalNullable[bool] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: - r"""Get Batch Job - - Get a batch job details by its UUID. - - Args: - inline: If True, return results inline in the response. - - :param job_id: - :param inline: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesBatchGetBatchJobRequest( - job_id=job_id, - inline=inline, - ) - - req = self._build_request( - method="GET", - path="/v1/batch/jobs/{job_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_async( - self, - *, - job_id: str, - inline: OptionalNullable[bool] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: - r"""Get Batch Job - - Get a batch job details by its UUID. - - Args: - inline: If True, return results inline in the response. - - :param job_id: - :param inline: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesBatchGetBatchJobRequest( - job_id=job_id, - inline=inline, - ) - - req = self._build_request_async( - method="GET", - path="/v1/batch/jobs/{job_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def cancel( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: - r"""Cancel Batch Job - - Request the cancellation of a batch job. - - :param job_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesBatchCancelBatchJobRequest( - job_id=job_id, - ) - - req = self._build_request( - method="POST", - path="/v1/batch/jobs/{job_id}/cancel", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_cancel_batch_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def cancel_async( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: - r"""Cancel Batch Job - - Request the cancellation of a batch job. - - :param job_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesBatchCancelBatchJobRequest( - job_id=job_id, - ) - - req = self._build_request_async( - method="POST", - path="/v1/batch/jobs/{job_id}/cancel", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_cancel_batch_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py deleted file mode 100644 index 23e65222..00000000 --- a/src/mistralai/models/__init__.py +++ /dev/null @@ -1,2531 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .mistralerror import MistralError -from typing import TYPE_CHECKING -from importlib import import_module -import builtins -import sys - -if TYPE_CHECKING: - from .agent import ( - Agent, - AgentObject, - AgentTools, - AgentToolsTypedDict, - AgentTypedDict, - ) - from .agentaliasresponse import AgentAliasResponse, AgentAliasResponseTypedDict - from .agentconversation import ( - AgentConversation, - AgentConversationAgentVersion, - AgentConversationAgentVersionTypedDict, - AgentConversationObject, - AgentConversationTypedDict, - ) - from .agentcreationrequest import ( - AgentCreationRequest, - AgentCreationRequestTools, - AgentCreationRequestToolsTypedDict, - AgentCreationRequestTypedDict, - ) - from .agenthandoffdoneevent import ( - AgentHandoffDoneEvent, - AgentHandoffDoneEventType, - AgentHandoffDoneEventTypedDict, - ) - from .agenthandoffentry import ( - AgentHandoffEntry, - AgentHandoffEntryObject, - AgentHandoffEntryType, - AgentHandoffEntryTypedDict, - ) - from .agenthandoffstartedevent import ( - AgentHandoffStartedEvent, - AgentHandoffStartedEventType, - AgentHandoffStartedEventTypedDict, - ) - from .agents_api_v1_agents_create_or_update_aliasop import ( - AgentsAPIV1AgentsCreateOrUpdateAliasRequest, - AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict, - ) - from .agents_api_v1_agents_deleteop import ( - AgentsAPIV1AgentsDeleteRequest, - AgentsAPIV1AgentsDeleteRequestTypedDict, - ) - from .agents_api_v1_agents_get_versionop import ( - AgentsAPIV1AgentsGetVersionRequest, - AgentsAPIV1AgentsGetVersionRequestTypedDict, - ) - from .agents_api_v1_agents_getop import ( - AgentsAPIV1AgentsGetRequest, - AgentsAPIV1AgentsGetRequestTypedDict, - QueryParamAgentVersion, - QueryParamAgentVersionTypedDict, - ) - from .agents_api_v1_agents_list_version_aliasesop import ( - AgentsAPIV1AgentsListVersionAliasesRequest, - AgentsAPIV1AgentsListVersionAliasesRequestTypedDict, - ) - from .agents_api_v1_agents_list_versionsop import ( - AgentsAPIV1AgentsListVersionsRequest, - AgentsAPIV1AgentsListVersionsRequestTypedDict, - ) - from .agents_api_v1_agents_listop import ( - AgentsAPIV1AgentsListRequest, - AgentsAPIV1AgentsListRequestTypedDict, - ) - from .agents_api_v1_agents_update_versionop import ( - AgentsAPIV1AgentsUpdateVersionRequest, - AgentsAPIV1AgentsUpdateVersionRequestTypedDict, - ) - from .agents_api_v1_agents_updateop import ( - AgentsAPIV1AgentsUpdateRequest, - AgentsAPIV1AgentsUpdateRequestTypedDict, - ) - from .agents_api_v1_conversations_append_streamop import ( - AgentsAPIV1ConversationsAppendStreamRequest, - AgentsAPIV1ConversationsAppendStreamRequestTypedDict, - ) - from .agents_api_v1_conversations_appendop import ( - AgentsAPIV1ConversationsAppendRequest, - AgentsAPIV1ConversationsAppendRequestTypedDict, - ) - from .agents_api_v1_conversations_deleteop import ( - AgentsAPIV1ConversationsDeleteRequest, - AgentsAPIV1ConversationsDeleteRequestTypedDict, - ) - from .agents_api_v1_conversations_getop import ( - AgentsAPIV1ConversationsGetRequest, - AgentsAPIV1ConversationsGetRequestTypedDict, - AgentsAPIV1ConversationsGetResponseV1ConversationsGet, - AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict, - ) - from .agents_api_v1_conversations_historyop import ( - AgentsAPIV1ConversationsHistoryRequest, - AgentsAPIV1ConversationsHistoryRequestTypedDict, - ) - from .agents_api_v1_conversations_listop import ( - AgentsAPIV1ConversationsListRequest, - AgentsAPIV1ConversationsListRequestTypedDict, - ResponseBody, - ResponseBodyTypedDict, - ) - from .agents_api_v1_conversations_messagesop import ( - AgentsAPIV1ConversationsMessagesRequest, - AgentsAPIV1ConversationsMessagesRequestTypedDict, - ) - from .agents_api_v1_conversations_restart_streamop import ( - AgentsAPIV1ConversationsRestartStreamRequest, - AgentsAPIV1ConversationsRestartStreamRequestTypedDict, - ) - from .agents_api_v1_conversations_restartop import ( - AgentsAPIV1ConversationsRestartRequest, - AgentsAPIV1ConversationsRestartRequestTypedDict, - ) - from .agentscompletionrequest import ( - AgentsCompletionRequest, - AgentsCompletionRequestMessages, - AgentsCompletionRequestMessagesTypedDict, - AgentsCompletionRequestStop, - AgentsCompletionRequestStopTypedDict, - AgentsCompletionRequestToolChoice, - AgentsCompletionRequestToolChoiceTypedDict, - AgentsCompletionRequestTypedDict, - ) - from .agentscompletionstreamrequest import ( - AgentsCompletionStreamRequest, - AgentsCompletionStreamRequestMessages, - AgentsCompletionStreamRequestMessagesTypedDict, - AgentsCompletionStreamRequestStop, - AgentsCompletionStreamRequestStopTypedDict, - AgentsCompletionStreamRequestToolChoice, - AgentsCompletionStreamRequestToolChoiceTypedDict, - AgentsCompletionStreamRequestTypedDict, - ) - from .agentupdaterequest import ( - AgentUpdateRequest, - AgentUpdateRequestTools, - AgentUpdateRequestToolsTypedDict, - AgentUpdateRequestTypedDict, - ) - from .apiendpoint import APIEndpoint - from .archiveftmodelout import ( - ArchiveFTModelOut, - ArchiveFTModelOutObject, - ArchiveFTModelOutTypedDict, - ) - from .assistantmessage import ( - AssistantMessage, - AssistantMessageContent, - AssistantMessageContentTypedDict, - AssistantMessageRole, - AssistantMessageTypedDict, - ) - from .audiochunk import AudioChunk, AudioChunkType, AudioChunkTypedDict - from .audioencoding import AudioEncoding - from .audioformat import AudioFormat, AudioFormatTypedDict - from .audiotranscriptionrequest import ( - AudioTranscriptionRequest, - AudioTranscriptionRequestTypedDict, - ) - from .audiotranscriptionrequeststream import ( - AudioTranscriptionRequestStream, - AudioTranscriptionRequestStreamTypedDict, - ) - from .basemodelcard import BaseModelCard, BaseModelCardType, BaseModelCardTypedDict - from .batcherror import BatchError, BatchErrorTypedDict - from .batchjobin import BatchJobIn, BatchJobInTypedDict - from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict - from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict - from .batchjobstatus import BatchJobStatus - from .batchrequest import BatchRequest, BatchRequestTypedDict - from .builtinconnectors import BuiltInConnectors - from .chatclassificationrequest import ( - ChatClassificationRequest, - ChatClassificationRequestTypedDict, - ) - from .chatcompletionchoice import ( - ChatCompletionChoice, - ChatCompletionChoiceTypedDict, - FinishReason, - ) - from .chatcompletionrequest import ( - ChatCompletionRequest, - ChatCompletionRequestToolChoice, - ChatCompletionRequestToolChoiceTypedDict, - ChatCompletionRequestTypedDict, - Messages, - MessagesTypedDict, - Stop, - StopTypedDict, - ) - from .chatcompletionresponse import ( - ChatCompletionResponse, - ChatCompletionResponseTypedDict, - ) - from .chatcompletionstreamrequest import ( - ChatCompletionStreamRequest, - ChatCompletionStreamRequestMessages, - ChatCompletionStreamRequestMessagesTypedDict, - ChatCompletionStreamRequestStop, - ChatCompletionStreamRequestStopTypedDict, - ChatCompletionStreamRequestToolChoice, - ChatCompletionStreamRequestToolChoiceTypedDict, - ChatCompletionStreamRequestTypedDict, - ) - from .chatmoderationrequest import ( - ChatModerationRequest, - ChatModerationRequestInputs, - ChatModerationRequestInputsTypedDict, - ChatModerationRequestTypedDict, - One, - OneTypedDict, - Two, - TwoTypedDict, - ) - from .checkpointout import CheckpointOut, CheckpointOutTypedDict - from .classificationrequest import ( - ClassificationRequest, - ClassificationRequestInputs, - ClassificationRequestInputsTypedDict, - ClassificationRequestTypedDict, - ) - from .classificationresponse import ( - ClassificationResponse, - ClassificationResponseTypedDict, - ) - from .classificationtargetresult import ( - ClassificationTargetResult, - ClassificationTargetResultTypedDict, - ) - from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutIntegrations, - ClassifierDetailedJobOutIntegrationsTypedDict, - ClassifierDetailedJobOutJobType, - ClassifierDetailedJobOutObject, - ClassifierDetailedJobOutStatus, - ClassifierDetailedJobOutTypedDict, - ) - from .classifierftmodelout import ( - ClassifierFTModelOut, - ClassifierFTModelOutModelType, - ClassifierFTModelOutObject, - ClassifierFTModelOutTypedDict, - ) - from .classifierjobout import ( - ClassifierJobOut, - ClassifierJobOutIntegrations, - ClassifierJobOutIntegrationsTypedDict, - ClassifierJobOutJobType, - ClassifierJobOutObject, - ClassifierJobOutStatus, - ClassifierJobOutTypedDict, - ) - from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict - from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict - from .classifiertrainingparameters import ( - ClassifierTrainingParameters, - ClassifierTrainingParametersTypedDict, - ) - from .classifiertrainingparametersin import ( - ClassifierTrainingParametersIn, - ClassifierTrainingParametersInTypedDict, - ) - from .codeinterpretertool import ( - CodeInterpreterTool, - CodeInterpreterToolType, - CodeInterpreterToolTypedDict, - ) - from .completionargs import CompletionArgs, CompletionArgsTypedDict - from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict - from .completionchunk import CompletionChunk, CompletionChunkTypedDict - from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutIntegrations, - CompletionDetailedJobOutIntegrationsTypedDict, - CompletionDetailedJobOutJobType, - CompletionDetailedJobOutObject, - CompletionDetailedJobOutRepositories, - CompletionDetailedJobOutRepositoriesTypedDict, - CompletionDetailedJobOutStatus, - CompletionDetailedJobOutTypedDict, - ) - from .completionevent import CompletionEvent, CompletionEventTypedDict - from .completionftmodelout import ( - CompletionFTModelOut, - CompletionFTModelOutObject, - CompletionFTModelOutTypedDict, - ModelType, - ) - from .completionjobout import ( - CompletionJobOut, - CompletionJobOutObject, - CompletionJobOutTypedDict, - Integrations, - IntegrationsTypedDict, - JobType, - Repositories, - RepositoriesTypedDict, - Status, - ) - from .completionresponsestreamchoice import ( - CompletionResponseStreamChoice, - CompletionResponseStreamChoiceFinishReason, - CompletionResponseStreamChoiceTypedDict, - ) - from .completiontrainingparameters import ( - CompletionTrainingParameters, - CompletionTrainingParametersTypedDict, - ) - from .completiontrainingparametersin import ( - CompletionTrainingParametersIn, - CompletionTrainingParametersInTypedDict, - ) - from .contentchunk import ContentChunk, ContentChunkTypedDict - from .conversationappendrequest import ( - ConversationAppendRequest, - ConversationAppendRequestHandoffExecution, - ConversationAppendRequestTypedDict, - ) - from .conversationappendstreamrequest import ( - ConversationAppendStreamRequest, - ConversationAppendStreamRequestHandoffExecution, - ConversationAppendStreamRequestTypedDict, - ) - from .conversationevents import ( - ConversationEvents, - ConversationEventsData, - ConversationEventsDataTypedDict, - ConversationEventsTypedDict, - ) - from .conversationhistory import ( - ConversationHistory, - ConversationHistoryObject, - ConversationHistoryTypedDict, - Entries, - EntriesTypedDict, - ) - from .conversationinputs import ConversationInputs, ConversationInputsTypedDict - from .conversationmessages import ( - ConversationMessages, - ConversationMessagesObject, - ConversationMessagesTypedDict, - ) - from .conversationrequest import ( - AgentVersion, - AgentVersionTypedDict, - ConversationRequest, - ConversationRequestTypedDict, - HandoffExecution, - Tools, - ToolsTypedDict, - ) - from .conversationresponse import ( - ConversationResponse, - ConversationResponseObject, - ConversationResponseTypedDict, - Outputs, - OutputsTypedDict, - ) - from .conversationrestartrequest import ( - ConversationRestartRequest, - ConversationRestartRequestAgentVersion, - ConversationRestartRequestAgentVersionTypedDict, - ConversationRestartRequestHandoffExecution, - ConversationRestartRequestTypedDict, - ) - from .conversationrestartstreamrequest import ( - ConversationRestartStreamRequest, - ConversationRestartStreamRequestAgentVersion, - ConversationRestartStreamRequestAgentVersionTypedDict, - ConversationRestartStreamRequestHandoffExecution, - ConversationRestartStreamRequestTypedDict, - ) - from .conversationstreamrequest import ( - ConversationStreamRequest, - ConversationStreamRequestAgentVersion, - ConversationStreamRequestAgentVersionTypedDict, - ConversationStreamRequestHandoffExecution, - ConversationStreamRequestTools, - ConversationStreamRequestToolsTypedDict, - ConversationStreamRequestTypedDict, - ) - from .conversationusageinfo import ( - ConversationUsageInfo, - ConversationUsageInfoTypedDict, - ) - from .delete_model_v1_models_model_id_deleteop import ( - DeleteModelV1ModelsModelIDDeleteRequest, - DeleteModelV1ModelsModelIDDeleteRequestTypedDict, - ) - from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict - from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict - from .deltamessage import ( - Content, - ContentTypedDict, - DeltaMessage, - DeltaMessageTypedDict, - ) - from .documentlibrarytool import ( - DocumentLibraryTool, - DocumentLibraryToolType, - DocumentLibraryToolTypedDict, - ) - from .documentout import DocumentOut, DocumentOutTypedDict - from .documenttextcontent import DocumentTextContent, DocumentTextContentTypedDict - from .documentupdatein import ( - Attributes, - AttributesTypedDict, - DocumentUpdateIn, - DocumentUpdateInTypedDict, - ) - from .documenturlchunk import ( - DocumentURLChunk, - DocumentURLChunkType, - DocumentURLChunkTypedDict, - ) - from .embeddingdtype import EmbeddingDtype - from .embeddingrequest import ( - EmbeddingRequest, - EmbeddingRequestInputs, - EmbeddingRequestInputsTypedDict, - EmbeddingRequestTypedDict, - ) - from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict - from .embeddingresponsedata import ( - EmbeddingResponseData, - EmbeddingResponseDataTypedDict, - ) - from .encodingformat import EncodingFormat - from .entitytype import EntityType - from .eventout import EventOut, EventOutTypedDict - from .file import File, FileTypedDict - from .filechunk import FileChunk, FileChunkTypedDict - from .filepurpose import FilePurpose - from .files_api_routes_delete_fileop import ( - FilesAPIRoutesDeleteFileRequest, - FilesAPIRoutesDeleteFileRequestTypedDict, - ) - from .files_api_routes_download_fileop import ( - FilesAPIRoutesDownloadFileRequest, - FilesAPIRoutesDownloadFileRequestTypedDict, - ) - from .files_api_routes_get_signed_urlop import ( - FilesAPIRoutesGetSignedURLRequest, - FilesAPIRoutesGetSignedURLRequestTypedDict, - ) - from .files_api_routes_list_filesop import ( - FilesAPIRoutesListFilesRequest, - FilesAPIRoutesListFilesRequestTypedDict, - ) - from .files_api_routes_retrieve_fileop import ( - FilesAPIRoutesRetrieveFileRequest, - FilesAPIRoutesRetrieveFileRequestTypedDict, - ) - from .files_api_routes_upload_fileop import ( - FilesAPIRoutesUploadFileMultiPartBodyParams, - FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, - ) - from .fileschema import FileSchema, FileSchemaTypedDict - from .filesignedurl import FileSignedURL, FileSignedURLTypedDict - from .fimcompletionrequest import ( - FIMCompletionRequest, - FIMCompletionRequestStop, - FIMCompletionRequestStopTypedDict, - FIMCompletionRequestTypedDict, - ) - from .fimcompletionresponse import ( - FIMCompletionResponse, - FIMCompletionResponseTypedDict, - ) - from .fimcompletionstreamrequest import ( - FIMCompletionStreamRequest, - FIMCompletionStreamRequestStop, - FIMCompletionStreamRequestStopTypedDict, - FIMCompletionStreamRequestTypedDict, - ) - from .finetuneablemodeltype import FineTuneableModelType - from .ftclassifierlossfunction import FTClassifierLossFunction - from .ftmodelcapabilitiesout import ( - FTModelCapabilitiesOut, - FTModelCapabilitiesOutTypedDict, - ) - from .ftmodelcard import FTModelCard, FTModelCardType, FTModelCardTypedDict - from .function import Function, FunctionTypedDict - from .functioncall import ( - Arguments, - ArgumentsTypedDict, - FunctionCall, - FunctionCallTypedDict, - ) - from .functioncallentry import ( - FunctionCallEntry, - FunctionCallEntryObject, - FunctionCallEntryType, - FunctionCallEntryTypedDict, - ) - from .functioncallentryarguments import ( - FunctionCallEntryArguments, - FunctionCallEntryArgumentsTypedDict, - ) - from .functioncallevent import ( - FunctionCallEvent, - FunctionCallEventType, - FunctionCallEventTypedDict, - ) - from .functionname import FunctionName, FunctionNameTypedDict - from .functionresultentry import ( - FunctionResultEntry, - FunctionResultEntryObject, - FunctionResultEntryType, - FunctionResultEntryTypedDict, - ) - from .functiontool import FunctionTool, FunctionToolType, FunctionToolTypedDict - from .githubrepositoryin import ( - GithubRepositoryIn, - GithubRepositoryInType, - GithubRepositoryInTypedDict, - ) - from .githubrepositoryout import ( - GithubRepositoryOut, - GithubRepositoryOutType, - GithubRepositoryOutTypedDict, - ) - from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData - from .imagegenerationtool import ( - ImageGenerationTool, - ImageGenerationToolType, - ImageGenerationToolTypedDict, - ) - from .imageurl import ImageURL, ImageURLTypedDict - from .imageurlchunk import ( - ImageURLChunk, - ImageURLChunkImageURL, - ImageURLChunkImageURLTypedDict, - ImageURLChunkType, - ImageURLChunkTypedDict, - ) - from .inputentries import InputEntries, InputEntriesTypedDict - from .inputs import ( - Inputs, - InputsTypedDict, - InstructRequestInputs, - InstructRequestInputsMessages, - InstructRequestInputsMessagesTypedDict, - InstructRequestInputsTypedDict, - ) - from .instructrequest import ( - InstructRequest, - InstructRequestMessages, - InstructRequestMessagesTypedDict, - InstructRequestTypedDict, - ) - from .jobin import ( - Hyperparameters, - HyperparametersTypedDict, - JobIn, - JobInIntegrations, - JobInIntegrationsTypedDict, - JobInRepositories, - JobInRepositoriesTypedDict, - JobInTypedDict, - ) - from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict - from .jobs_api_routes_batch_cancel_batch_jobop import ( - JobsAPIRoutesBatchCancelBatchJobRequest, - JobsAPIRoutesBatchCancelBatchJobRequestTypedDict, - ) - from .jobs_api_routes_batch_get_batch_jobop import ( - JobsAPIRoutesBatchGetBatchJobRequest, - JobsAPIRoutesBatchGetBatchJobRequestTypedDict, - ) - from .jobs_api_routes_batch_get_batch_jobsop import ( - JobsAPIRoutesBatchGetBatchJobsRequest, - JobsAPIRoutesBatchGetBatchJobsRequestTypedDict, - ) - from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import ( - JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, - JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict, - ) - from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningCancelFineTuningJobRequest, - JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict, - JobsAPIRoutesFineTuningCancelFineTuningJobResponse, - JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict, - ) - from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningCreateFineTuningJobResponse, - JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, - Response1, - Response1TypedDict, - ) - from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningGetFineTuningJobRequest, - JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict, - JobsAPIRoutesFineTuningGetFineTuningJobResponse, - JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict, - ) - from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( - JobsAPIRoutesFineTuningGetFineTuningJobsRequest, - JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, - QueryParamStatus, - ) - from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningStartFineTuningJobRequest, - JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict, - JobsAPIRoutesFineTuningStartFineTuningJobResponse, - JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict, - ) - from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import ( - JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, - JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict, - ) - from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import ( - JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, - JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, - JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, - JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict, - ) - from .jobsout import ( - JobsOut, - JobsOutData, - JobsOutDataTypedDict, - JobsOutObject, - JobsOutTypedDict, - ) - from .jsonschema import JSONSchema, JSONSchemaTypedDict - from .legacyjobmetadataout import ( - LegacyJobMetadataOut, - LegacyJobMetadataOutObject, - LegacyJobMetadataOutTypedDict, - ) - from .libraries_delete_v1op import ( - LibrariesDeleteV1Request, - LibrariesDeleteV1RequestTypedDict, - ) - from .libraries_documents_delete_v1op import ( - LibrariesDocumentsDeleteV1Request, - LibrariesDocumentsDeleteV1RequestTypedDict, - ) - from .libraries_documents_get_extracted_text_signed_url_v1op import ( - LibrariesDocumentsGetExtractedTextSignedURLV1Request, - LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict, - ) - from .libraries_documents_get_signed_url_v1op import ( - LibrariesDocumentsGetSignedURLV1Request, - LibrariesDocumentsGetSignedURLV1RequestTypedDict, - ) - from .libraries_documents_get_status_v1op import ( - LibrariesDocumentsGetStatusV1Request, - LibrariesDocumentsGetStatusV1RequestTypedDict, - ) - from .libraries_documents_get_text_content_v1op import ( - LibrariesDocumentsGetTextContentV1Request, - LibrariesDocumentsGetTextContentV1RequestTypedDict, - ) - from .libraries_documents_get_v1op import ( - LibrariesDocumentsGetV1Request, - LibrariesDocumentsGetV1RequestTypedDict, - ) - from .libraries_documents_list_v1op import ( - LibrariesDocumentsListV1Request, - LibrariesDocumentsListV1RequestTypedDict, - ) - from .libraries_documents_reprocess_v1op import ( - LibrariesDocumentsReprocessV1Request, - LibrariesDocumentsReprocessV1RequestTypedDict, - ) - from .libraries_documents_update_v1op import ( - LibrariesDocumentsUpdateV1Request, - LibrariesDocumentsUpdateV1RequestTypedDict, - ) - from .libraries_documents_upload_v1op import ( - LibrariesDocumentsUploadV1DocumentUpload, - LibrariesDocumentsUploadV1DocumentUploadTypedDict, - LibrariesDocumentsUploadV1Request, - LibrariesDocumentsUploadV1RequestTypedDict, - ) - from .libraries_get_v1op import ( - LibrariesGetV1Request, - LibrariesGetV1RequestTypedDict, - ) - from .libraries_share_create_v1op import ( - LibrariesShareCreateV1Request, - LibrariesShareCreateV1RequestTypedDict, - ) - from .libraries_share_delete_v1op import ( - LibrariesShareDeleteV1Request, - LibrariesShareDeleteV1RequestTypedDict, - ) - from .libraries_share_list_v1op import ( - LibrariesShareListV1Request, - LibrariesShareListV1RequestTypedDict, - ) - from .libraries_update_v1op import ( - LibrariesUpdateV1Request, - LibrariesUpdateV1RequestTypedDict, - ) - from .libraryin import LibraryIn, LibraryInTypedDict - from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict - from .libraryout import LibraryOut, LibraryOutTypedDict - from .listdocumentout import ListDocumentOut, ListDocumentOutTypedDict - from .listfilesout import ListFilesOut, ListFilesOutTypedDict - from .listlibraryout import ListLibraryOut, ListLibraryOutTypedDict - from .listsharingout import ListSharingOut, ListSharingOutTypedDict - from .messageentries import MessageEntries, MessageEntriesTypedDict - from .messageinputcontentchunks import ( - MessageInputContentChunks, - MessageInputContentChunksTypedDict, - ) - from .messageinputentry import ( - MessageInputEntry, - MessageInputEntryContent, - MessageInputEntryContentTypedDict, - MessageInputEntryRole, - MessageInputEntryType, - MessageInputEntryTypedDict, - Object, - ) - from .messageoutputcontentchunks import ( - MessageOutputContentChunks, - MessageOutputContentChunksTypedDict, - ) - from .messageoutputentry import ( - MessageOutputEntry, - MessageOutputEntryContent, - MessageOutputEntryContentTypedDict, - MessageOutputEntryObject, - MessageOutputEntryRole, - MessageOutputEntryType, - MessageOutputEntryTypedDict, - ) - from .messageoutputevent import ( - MessageOutputEvent, - MessageOutputEventContent, - MessageOutputEventContentTypedDict, - MessageOutputEventRole, - MessageOutputEventType, - MessageOutputEventTypedDict, - ) - from .metricout import MetricOut, MetricOutTypedDict - from .mistralpromptmode import MistralPromptMode - from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict - from .modelconversation import ( - ModelConversation, - ModelConversationObject, - ModelConversationTools, - ModelConversationToolsTypedDict, - ModelConversationTypedDict, - ) - from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict - from .moderationobject import ModerationObject, ModerationObjectTypedDict - from .moderationresponse import ModerationResponse, ModerationResponseTypedDict - from .no_response_error import NoResponseError - from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict - from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict - from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict - from .ocrrequest import ( - Document, - DocumentTypedDict, - OCRRequest, - OCRRequestTypedDict, - TableFormat, - ) - from .ocrresponse import OCRResponse, OCRResponseTypedDict - from .ocrtableobject import Format, OCRTableObject, OCRTableObjectTypedDict - from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict - from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict - from .paginationinfo import PaginationInfo, PaginationInfoTypedDict - from .prediction import Prediction, PredictionTypedDict - from .processingstatusout import ProcessingStatusOut, ProcessingStatusOutTypedDict - from .realtimetranscriptionerror import ( - RealtimeTranscriptionError, - RealtimeTranscriptionErrorTypedDict, - ) - from .realtimetranscriptionerrordetail import ( - Message, - MessageTypedDict, - RealtimeTranscriptionErrorDetail, - RealtimeTranscriptionErrorDetailTypedDict, - ) - from .realtimetranscriptionsession import ( - RealtimeTranscriptionSession, - RealtimeTranscriptionSessionTypedDict, - ) - from .realtimetranscriptionsessioncreated import ( - RealtimeTranscriptionSessionCreated, - RealtimeTranscriptionSessionCreatedTypedDict, - ) - from .realtimetranscriptionsessionupdated import ( - RealtimeTranscriptionSessionUpdated, - RealtimeTranscriptionSessionUpdatedTypedDict, - ) - from .referencechunk import ( - ReferenceChunk, - ReferenceChunkType, - ReferenceChunkTypedDict, - ) - from .requestsource import RequestSource - from .responsedoneevent import ( - ResponseDoneEvent, - ResponseDoneEventType, - ResponseDoneEventTypedDict, - ) - from .responseerrorevent import ( - ResponseErrorEvent, - ResponseErrorEventType, - ResponseErrorEventTypedDict, - ) - from .responseformat import ResponseFormat, ResponseFormatTypedDict - from .responseformats import ResponseFormats - from .responsestartedevent import ( - ResponseStartedEvent, - ResponseStartedEventType, - ResponseStartedEventTypedDict, - ) - from .responsevalidationerror import ResponseValidationError - from .retrieve_model_v1_models_model_id_getop import ( - RetrieveModelV1ModelsModelIDGetRequest, - RetrieveModelV1ModelsModelIDGetRequestTypedDict, - RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, - RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict, - ) - from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict - from .sampletype import SampleType - from .sdkerror import SDKError - from .security import Security, SecurityTypedDict - from .shareenum import ShareEnum - from .sharingdelete import SharingDelete, SharingDeleteTypedDict - from .sharingin import SharingIn, SharingInTypedDict - from .sharingout import SharingOut, SharingOutTypedDict - from .source import Source - from .ssetypes import SSETypes - from .systemmessage import ( - Role, - SystemMessage, - SystemMessageContent, - SystemMessageContentTypedDict, - SystemMessageTypedDict, - ) - from .systemmessagecontentchunks import ( - SystemMessageContentChunks, - SystemMessageContentChunksTypedDict, - ) - from .textchunk import TextChunk, TextChunkType, TextChunkTypedDict - from .thinkchunk import ( - ThinkChunk, - ThinkChunkType, - ThinkChunkTypedDict, - Thinking, - ThinkingTypedDict, - ) - from .timestampgranularity import TimestampGranularity - from .tool import Tool, ToolTypedDict - from .toolcall import ToolCall, ToolCallTypedDict - from .toolchoice import ToolChoice, ToolChoiceTypedDict - from .toolchoiceenum import ToolChoiceEnum - from .toolexecutiondeltaevent import ( - ToolExecutionDeltaEvent, - ToolExecutionDeltaEventName, - ToolExecutionDeltaEventNameTypedDict, - ToolExecutionDeltaEventType, - ToolExecutionDeltaEventTypedDict, - ) - from .toolexecutiondoneevent import ( - ToolExecutionDoneEvent, - ToolExecutionDoneEventName, - ToolExecutionDoneEventNameTypedDict, - ToolExecutionDoneEventType, - ToolExecutionDoneEventTypedDict, - ) - from .toolexecutionentry import ( - Name, - NameTypedDict, - ToolExecutionEntry, - ToolExecutionEntryObject, - ToolExecutionEntryType, - ToolExecutionEntryTypedDict, - ) - from .toolexecutionstartedevent import ( - ToolExecutionStartedEvent, - ToolExecutionStartedEventName, - ToolExecutionStartedEventNameTypedDict, - ToolExecutionStartedEventType, - ToolExecutionStartedEventTypedDict, - ) - from .toolfilechunk import ( - ToolFileChunk, - ToolFileChunkTool, - ToolFileChunkToolTypedDict, - ToolFileChunkType, - ToolFileChunkTypedDict, - ) - from .toolmessage import ( - ToolMessage, - ToolMessageContent, - ToolMessageContentTypedDict, - ToolMessageRole, - ToolMessageTypedDict, - ) - from .toolreferencechunk import ( - ToolReferenceChunk, - ToolReferenceChunkTool, - ToolReferenceChunkToolTypedDict, - ToolReferenceChunkType, - ToolReferenceChunkTypedDict, - ) - from .tooltypes import ToolTypes - from .trainingfile import TrainingFile, TrainingFileTypedDict - from .transcriptionresponse import ( - TranscriptionResponse, - TranscriptionResponseTypedDict, - ) - from .transcriptionsegmentchunk import ( - TranscriptionSegmentChunk, - TranscriptionSegmentChunkTypedDict, - Type, - ) - from .transcriptionstreamdone import ( - TranscriptionStreamDone, - TranscriptionStreamDoneType, - TranscriptionStreamDoneTypedDict, - ) - from .transcriptionstreamevents import ( - TranscriptionStreamEvents, - TranscriptionStreamEventsData, - TranscriptionStreamEventsDataTypedDict, - TranscriptionStreamEventsTypedDict, - ) - from .transcriptionstreameventtypes import TranscriptionStreamEventTypes - from .transcriptionstreamlanguage import ( - TranscriptionStreamLanguage, - TranscriptionStreamLanguageType, - TranscriptionStreamLanguageTypedDict, - ) - from .transcriptionstreamsegmentdelta import ( - TranscriptionStreamSegmentDelta, - TranscriptionStreamSegmentDeltaType, - TranscriptionStreamSegmentDeltaTypedDict, - ) - from .transcriptionstreamtextdelta import ( - TranscriptionStreamTextDelta, - TranscriptionStreamTextDeltaType, - TranscriptionStreamTextDeltaTypedDict, - ) - from .unarchiveftmodelout import ( - UnarchiveFTModelOut, - UnarchiveFTModelOutObject, - UnarchiveFTModelOutTypedDict, - ) - from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict - from .uploadfileout import UploadFileOut, UploadFileOutTypedDict - from .usageinfo import UsageInfo, UsageInfoTypedDict - from .usermessage import ( - UserMessage, - UserMessageContent, - UserMessageContentTypedDict, - UserMessageRole, - UserMessageTypedDict, - ) - from .validationerror import ( - Loc, - LocTypedDict, - ValidationError, - ValidationErrorTypedDict, - ) - from .wandbintegration import ( - WandbIntegration, - WandbIntegrationType, - WandbIntegrationTypedDict, - ) - from .wandbintegrationout import ( - WandbIntegrationOut, - WandbIntegrationOutType, - WandbIntegrationOutTypedDict, - ) - from .websearchpremiumtool import ( - WebSearchPremiumTool, - WebSearchPremiumToolType, - WebSearchPremiumToolTypedDict, - ) - from .websearchtool import WebSearchTool, WebSearchToolType, WebSearchToolTypedDict - -__all__ = [ - "APIEndpoint", - "Agent", - "AgentAliasResponse", - "AgentAliasResponseTypedDict", - "AgentConversation", - "AgentConversationAgentVersion", - "AgentConversationAgentVersionTypedDict", - "AgentConversationObject", - "AgentConversationTypedDict", - "AgentCreationRequest", - "AgentCreationRequestTools", - "AgentCreationRequestToolsTypedDict", - "AgentCreationRequestTypedDict", - "AgentHandoffDoneEvent", - "AgentHandoffDoneEventType", - "AgentHandoffDoneEventTypedDict", - "AgentHandoffEntry", - "AgentHandoffEntryObject", - "AgentHandoffEntryType", - "AgentHandoffEntryTypedDict", - "AgentHandoffStartedEvent", - "AgentHandoffStartedEventType", - "AgentHandoffStartedEventTypedDict", - "AgentObject", - "AgentTools", - "AgentToolsTypedDict", - "AgentTypedDict", - "AgentUpdateRequest", - "AgentUpdateRequestTools", - "AgentUpdateRequestToolsTypedDict", - "AgentUpdateRequestTypedDict", - "AgentVersion", - "AgentVersionTypedDict", - "AgentsAPIV1AgentsCreateOrUpdateAliasRequest", - "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict", - "AgentsAPIV1AgentsDeleteRequest", - "AgentsAPIV1AgentsDeleteRequestTypedDict", - "AgentsAPIV1AgentsGetRequest", - "AgentsAPIV1AgentsGetRequestTypedDict", - "AgentsAPIV1AgentsGetVersionRequest", - "AgentsAPIV1AgentsGetVersionRequestTypedDict", - "AgentsAPIV1AgentsListRequest", - "AgentsAPIV1AgentsListRequestTypedDict", - "AgentsAPIV1AgentsListVersionAliasesRequest", - "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict", - "AgentsAPIV1AgentsListVersionsRequest", - "AgentsAPIV1AgentsListVersionsRequestTypedDict", - "AgentsAPIV1AgentsUpdateRequest", - "AgentsAPIV1AgentsUpdateRequestTypedDict", - "AgentsAPIV1AgentsUpdateVersionRequest", - "AgentsAPIV1AgentsUpdateVersionRequestTypedDict", - "AgentsAPIV1ConversationsAppendRequest", - "AgentsAPIV1ConversationsAppendRequestTypedDict", - "AgentsAPIV1ConversationsAppendStreamRequest", - "AgentsAPIV1ConversationsAppendStreamRequestTypedDict", - "AgentsAPIV1ConversationsDeleteRequest", - "AgentsAPIV1ConversationsDeleteRequestTypedDict", - "AgentsAPIV1ConversationsGetRequest", - "AgentsAPIV1ConversationsGetRequestTypedDict", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict", - "AgentsAPIV1ConversationsHistoryRequest", - "AgentsAPIV1ConversationsHistoryRequestTypedDict", - "AgentsAPIV1ConversationsListRequest", - "AgentsAPIV1ConversationsListRequestTypedDict", - "AgentsAPIV1ConversationsMessagesRequest", - "AgentsAPIV1ConversationsMessagesRequestTypedDict", - "AgentsAPIV1ConversationsRestartRequest", - "AgentsAPIV1ConversationsRestartRequestTypedDict", - "AgentsAPIV1ConversationsRestartStreamRequest", - "AgentsAPIV1ConversationsRestartStreamRequestTypedDict", - "AgentsCompletionRequest", - "AgentsCompletionRequestMessages", - "AgentsCompletionRequestMessagesTypedDict", - "AgentsCompletionRequestStop", - "AgentsCompletionRequestStopTypedDict", - "AgentsCompletionRequestToolChoice", - "AgentsCompletionRequestToolChoiceTypedDict", - "AgentsCompletionRequestTypedDict", - "AgentsCompletionStreamRequest", - "AgentsCompletionStreamRequestMessages", - "AgentsCompletionStreamRequestMessagesTypedDict", - "AgentsCompletionStreamRequestStop", - "AgentsCompletionStreamRequestStopTypedDict", - "AgentsCompletionStreamRequestToolChoice", - "AgentsCompletionStreamRequestToolChoiceTypedDict", - "AgentsCompletionStreamRequestTypedDict", - "ArchiveFTModelOut", - "ArchiveFTModelOutObject", - "ArchiveFTModelOutTypedDict", - "Arguments", - "ArgumentsTypedDict", - "AssistantMessage", - "AssistantMessageContent", - "AssistantMessageContentTypedDict", - "AssistantMessageRole", - "AssistantMessageTypedDict", - "Attributes", - "AttributesTypedDict", - "AudioChunk", - "AudioChunkType", - "AudioChunkTypedDict", - "AudioEncoding", - "AudioFormat", - "AudioFormatTypedDict", - "AudioTranscriptionRequest", - "AudioTranscriptionRequestStream", - "AudioTranscriptionRequestStreamTypedDict", - "AudioTranscriptionRequestTypedDict", - "BaseModelCard", - "BaseModelCardType", - "BaseModelCardTypedDict", - "BatchError", - "BatchErrorTypedDict", - "BatchJobIn", - "BatchJobInTypedDict", - "BatchJobOut", - "BatchJobOutObject", - "BatchJobOutTypedDict", - "BatchJobStatus", - "BatchJobsOut", - "BatchJobsOutObject", - "BatchJobsOutTypedDict", - "BatchRequest", - "BatchRequestTypedDict", - "BuiltInConnectors", - "ChatClassificationRequest", - "ChatClassificationRequestTypedDict", - "ChatCompletionChoice", - "ChatCompletionChoiceTypedDict", - "ChatCompletionRequest", - "ChatCompletionRequestToolChoice", - "ChatCompletionRequestToolChoiceTypedDict", - "ChatCompletionRequestTypedDict", - "ChatCompletionResponse", - "ChatCompletionResponseTypedDict", - "ChatCompletionStreamRequest", - "ChatCompletionStreamRequestMessages", - "ChatCompletionStreamRequestMessagesTypedDict", - "ChatCompletionStreamRequestStop", - "ChatCompletionStreamRequestStopTypedDict", - "ChatCompletionStreamRequestToolChoice", - "ChatCompletionStreamRequestToolChoiceTypedDict", - "ChatCompletionStreamRequestTypedDict", - "ChatModerationRequest", - "ChatModerationRequestInputs", - "ChatModerationRequestInputsTypedDict", - "ChatModerationRequestTypedDict", - "CheckpointOut", - "CheckpointOutTypedDict", - "ClassificationRequest", - "ClassificationRequestInputs", - "ClassificationRequestInputsTypedDict", - "ClassificationRequestTypedDict", - "ClassificationResponse", - "ClassificationResponseTypedDict", - "ClassificationTargetResult", - "ClassificationTargetResultTypedDict", - "ClassifierDetailedJobOut", - "ClassifierDetailedJobOutIntegrations", - "ClassifierDetailedJobOutIntegrationsTypedDict", - "ClassifierDetailedJobOutJobType", - "ClassifierDetailedJobOutObject", - "ClassifierDetailedJobOutStatus", - "ClassifierDetailedJobOutTypedDict", - "ClassifierFTModelOut", - "ClassifierFTModelOutModelType", - "ClassifierFTModelOutObject", - "ClassifierFTModelOutTypedDict", - "ClassifierJobOut", - "ClassifierJobOutIntegrations", - "ClassifierJobOutIntegrationsTypedDict", - "ClassifierJobOutJobType", - "ClassifierJobOutObject", - "ClassifierJobOutStatus", - "ClassifierJobOutTypedDict", - "ClassifierTargetIn", - "ClassifierTargetInTypedDict", - "ClassifierTargetOut", - "ClassifierTargetOutTypedDict", - "ClassifierTrainingParameters", - "ClassifierTrainingParametersIn", - "ClassifierTrainingParametersInTypedDict", - "ClassifierTrainingParametersTypedDict", - "CodeInterpreterTool", - "CodeInterpreterToolType", - "CodeInterpreterToolTypedDict", - "CompletionArgs", - "CompletionArgsStop", - "CompletionArgsStopTypedDict", - "CompletionArgsTypedDict", - "CompletionChunk", - "CompletionChunkTypedDict", - "CompletionDetailedJobOut", - "CompletionDetailedJobOutIntegrations", - "CompletionDetailedJobOutIntegrationsTypedDict", - "CompletionDetailedJobOutJobType", - "CompletionDetailedJobOutObject", - "CompletionDetailedJobOutRepositories", - "CompletionDetailedJobOutRepositoriesTypedDict", - "CompletionDetailedJobOutStatus", - "CompletionDetailedJobOutTypedDict", - "CompletionEvent", - "CompletionEventTypedDict", - "CompletionFTModelOut", - "CompletionFTModelOutObject", - "CompletionFTModelOutTypedDict", - "CompletionJobOut", - "CompletionJobOutObject", - "CompletionJobOutTypedDict", - "CompletionResponseStreamChoice", - "CompletionResponseStreamChoiceFinishReason", - "CompletionResponseStreamChoiceTypedDict", - "CompletionTrainingParameters", - "CompletionTrainingParametersIn", - "CompletionTrainingParametersInTypedDict", - "CompletionTrainingParametersTypedDict", - "Content", - "ContentChunk", - "ContentChunkTypedDict", - "ContentTypedDict", - "ConversationAppendRequest", - "ConversationAppendRequestHandoffExecution", - "ConversationAppendRequestTypedDict", - "ConversationAppendStreamRequest", - "ConversationAppendStreamRequestHandoffExecution", - "ConversationAppendStreamRequestTypedDict", - "ConversationEvents", - "ConversationEventsData", - "ConversationEventsDataTypedDict", - "ConversationEventsTypedDict", - "ConversationHistory", - "ConversationHistoryObject", - "ConversationHistoryTypedDict", - "ConversationInputs", - "ConversationInputsTypedDict", - "ConversationMessages", - "ConversationMessagesObject", - "ConversationMessagesTypedDict", - "ConversationRequest", - "ConversationRequestTypedDict", - "ConversationResponse", - "ConversationResponseObject", - "ConversationResponseTypedDict", - "ConversationRestartRequest", - "ConversationRestartRequestAgentVersion", - "ConversationRestartRequestAgentVersionTypedDict", - "ConversationRestartRequestHandoffExecution", - "ConversationRestartRequestTypedDict", - "ConversationRestartStreamRequest", - "ConversationRestartStreamRequestAgentVersion", - "ConversationRestartStreamRequestAgentVersionTypedDict", - "ConversationRestartStreamRequestHandoffExecution", - "ConversationRestartStreamRequestTypedDict", - "ConversationStreamRequest", - "ConversationStreamRequestAgentVersion", - "ConversationStreamRequestAgentVersionTypedDict", - "ConversationStreamRequestHandoffExecution", - "ConversationStreamRequestTools", - "ConversationStreamRequestToolsTypedDict", - "ConversationStreamRequestTypedDict", - "ConversationUsageInfo", - "ConversationUsageInfoTypedDict", - "Data", - "DataTypedDict", - "DeleteFileOut", - "DeleteFileOutTypedDict", - "DeleteModelOut", - "DeleteModelOutTypedDict", - "DeleteModelV1ModelsModelIDDeleteRequest", - "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", - "DeltaMessage", - "DeltaMessageTypedDict", - "Document", - "DocumentLibraryTool", - "DocumentLibraryToolType", - "DocumentLibraryToolTypedDict", - "DocumentOut", - "DocumentOutTypedDict", - "DocumentTextContent", - "DocumentTextContentTypedDict", - "DocumentTypedDict", - "DocumentURLChunk", - "DocumentURLChunkType", - "DocumentURLChunkTypedDict", - "DocumentUpdateIn", - "DocumentUpdateInTypedDict", - "EmbeddingDtype", - "EmbeddingRequest", - "EmbeddingRequestInputs", - "EmbeddingRequestInputsTypedDict", - "EmbeddingRequestTypedDict", - "EmbeddingResponse", - "EmbeddingResponseData", - "EmbeddingResponseDataTypedDict", - "EmbeddingResponseTypedDict", - "EncodingFormat", - "EntityType", - "Entries", - "EntriesTypedDict", - "EventOut", - "EventOutTypedDict", - "FIMCompletionRequest", - "FIMCompletionRequestStop", - "FIMCompletionRequestStopTypedDict", - "FIMCompletionRequestTypedDict", - "FIMCompletionResponse", - "FIMCompletionResponseTypedDict", - "FIMCompletionStreamRequest", - "FIMCompletionStreamRequestStop", - "FIMCompletionStreamRequestStopTypedDict", - "FIMCompletionStreamRequestTypedDict", - "FTClassifierLossFunction", - "FTModelCapabilitiesOut", - "FTModelCapabilitiesOutTypedDict", - "FTModelCard", - "FTModelCardType", - "FTModelCardTypedDict", - "File", - "FileChunk", - "FileChunkTypedDict", - "FilePurpose", - "FileSchema", - "FileSchemaTypedDict", - "FileSignedURL", - "FileSignedURLTypedDict", - "FileTypedDict", - "FilesAPIRoutesDeleteFileRequest", - "FilesAPIRoutesDeleteFileRequestTypedDict", - "FilesAPIRoutesDownloadFileRequest", - "FilesAPIRoutesDownloadFileRequestTypedDict", - "FilesAPIRoutesGetSignedURLRequest", - "FilesAPIRoutesGetSignedURLRequestTypedDict", - "FilesAPIRoutesListFilesRequest", - "FilesAPIRoutesListFilesRequestTypedDict", - "FilesAPIRoutesRetrieveFileRequest", - "FilesAPIRoutesRetrieveFileRequestTypedDict", - "FilesAPIRoutesUploadFileMultiPartBodyParams", - "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", - "FineTuneableModelType", - "FinishReason", - "Format", - "Function", - "FunctionCall", - "FunctionCallEntry", - "FunctionCallEntryArguments", - "FunctionCallEntryArgumentsTypedDict", - "FunctionCallEntryObject", - "FunctionCallEntryType", - "FunctionCallEntryTypedDict", - "FunctionCallEvent", - "FunctionCallEventType", - "FunctionCallEventTypedDict", - "FunctionCallTypedDict", - "FunctionName", - "FunctionNameTypedDict", - "FunctionResultEntry", - "FunctionResultEntryObject", - "FunctionResultEntryType", - "FunctionResultEntryTypedDict", - "FunctionTool", - "FunctionToolType", - "FunctionToolTypedDict", - "FunctionTypedDict", - "GithubRepositoryIn", - "GithubRepositoryInType", - "GithubRepositoryInTypedDict", - "GithubRepositoryOut", - "GithubRepositoryOutType", - "GithubRepositoryOutTypedDict", - "HTTPValidationError", - "HTTPValidationErrorData", - "HandoffExecution", - "Hyperparameters", - "HyperparametersTypedDict", - "ImageGenerationTool", - "ImageGenerationToolType", - "ImageGenerationToolTypedDict", - "ImageURL", - "ImageURLChunk", - "ImageURLChunkImageURL", - "ImageURLChunkImageURLTypedDict", - "ImageURLChunkType", - "ImageURLChunkTypedDict", - "ImageURLTypedDict", - "InputEntries", - "InputEntriesTypedDict", - "Inputs", - "InputsTypedDict", - "InstructRequest", - "InstructRequestInputs", - "InstructRequestInputsMessages", - "InstructRequestInputsMessagesTypedDict", - "InstructRequestInputsTypedDict", - "InstructRequestMessages", - "InstructRequestMessagesTypedDict", - "InstructRequestTypedDict", - "Integrations", - "IntegrationsTypedDict", - "JSONSchema", - "JSONSchemaTypedDict", - "JobIn", - "JobInIntegrations", - "JobInIntegrationsTypedDict", - "JobInRepositories", - "JobInRepositoriesTypedDict", - "JobInTypedDict", - "JobMetadataOut", - "JobMetadataOutTypedDict", - "JobType", - "JobsAPIRoutesBatchCancelBatchJobRequest", - "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict", - "JobsAPIRoutesBatchGetBatchJobRequest", - "JobsAPIRoutesBatchGetBatchJobRequestTypedDict", - "JobsAPIRoutesBatchGetBatchJobsRequest", - "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponse", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningGetFineTuningJobRequest", - "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", - "JobsAPIRoutesFineTuningGetFineTuningJobResponse", - "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", - "JobsAPIRoutesFineTuningStartFineTuningJobRequest", - "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", - "JobsAPIRoutesFineTuningStartFineTuningJobResponse", - "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", - "JobsOut", - "JobsOutData", - "JobsOutDataTypedDict", - "JobsOutObject", - "JobsOutTypedDict", - "LegacyJobMetadataOut", - "LegacyJobMetadataOutObject", - "LegacyJobMetadataOutTypedDict", - "LibrariesDeleteV1Request", - "LibrariesDeleteV1RequestTypedDict", - "LibrariesDocumentsDeleteV1Request", - "LibrariesDocumentsDeleteV1RequestTypedDict", - "LibrariesDocumentsGetExtractedTextSignedURLV1Request", - "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict", - "LibrariesDocumentsGetSignedURLV1Request", - "LibrariesDocumentsGetSignedURLV1RequestTypedDict", - "LibrariesDocumentsGetStatusV1Request", - "LibrariesDocumentsGetStatusV1RequestTypedDict", - "LibrariesDocumentsGetTextContentV1Request", - "LibrariesDocumentsGetTextContentV1RequestTypedDict", - "LibrariesDocumentsGetV1Request", - "LibrariesDocumentsGetV1RequestTypedDict", - "LibrariesDocumentsListV1Request", - "LibrariesDocumentsListV1RequestTypedDict", - "LibrariesDocumentsReprocessV1Request", - "LibrariesDocumentsReprocessV1RequestTypedDict", - "LibrariesDocumentsUpdateV1Request", - "LibrariesDocumentsUpdateV1RequestTypedDict", - "LibrariesDocumentsUploadV1DocumentUpload", - "LibrariesDocumentsUploadV1DocumentUploadTypedDict", - "LibrariesDocumentsUploadV1Request", - "LibrariesDocumentsUploadV1RequestTypedDict", - "LibrariesGetV1Request", - "LibrariesGetV1RequestTypedDict", - "LibrariesShareCreateV1Request", - "LibrariesShareCreateV1RequestTypedDict", - "LibrariesShareDeleteV1Request", - "LibrariesShareDeleteV1RequestTypedDict", - "LibrariesShareListV1Request", - "LibrariesShareListV1RequestTypedDict", - "LibrariesUpdateV1Request", - "LibrariesUpdateV1RequestTypedDict", - "LibraryIn", - "LibraryInTypedDict", - "LibraryInUpdate", - "LibraryInUpdateTypedDict", - "LibraryOut", - "LibraryOutTypedDict", - "ListDocumentOut", - "ListDocumentOutTypedDict", - "ListFilesOut", - "ListFilesOutTypedDict", - "ListLibraryOut", - "ListLibraryOutTypedDict", - "ListSharingOut", - "ListSharingOutTypedDict", - "Loc", - "LocTypedDict", - "Message", - "MessageEntries", - "MessageEntriesTypedDict", - "MessageInputContentChunks", - "MessageInputContentChunksTypedDict", - "MessageInputEntry", - "MessageInputEntryContent", - "MessageInputEntryContentTypedDict", - "MessageInputEntryRole", - "MessageInputEntryType", - "MessageInputEntryTypedDict", - "MessageOutputContentChunks", - "MessageOutputContentChunksTypedDict", - "MessageOutputEntry", - "MessageOutputEntryContent", - "MessageOutputEntryContentTypedDict", - "MessageOutputEntryObject", - "MessageOutputEntryRole", - "MessageOutputEntryType", - "MessageOutputEntryTypedDict", - "MessageOutputEvent", - "MessageOutputEventContent", - "MessageOutputEventContentTypedDict", - "MessageOutputEventRole", - "MessageOutputEventType", - "MessageOutputEventTypedDict", - "MessageTypedDict", - "Messages", - "MessagesTypedDict", - "MetricOut", - "MetricOutTypedDict", - "MistralError", - "MistralPromptMode", - "ModelCapabilities", - "ModelCapabilitiesTypedDict", - "ModelConversation", - "ModelConversationObject", - "ModelConversationTools", - "ModelConversationToolsTypedDict", - "ModelConversationTypedDict", - "ModelList", - "ModelListTypedDict", - "ModelType", - "ModerationObject", - "ModerationObjectTypedDict", - "ModerationResponse", - "ModerationResponseTypedDict", - "Name", - "NameTypedDict", - "NoResponseError", - "OCRImageObject", - "OCRImageObjectTypedDict", - "OCRPageDimensions", - "OCRPageDimensionsTypedDict", - "OCRPageObject", - "OCRPageObjectTypedDict", - "OCRRequest", - "OCRRequestTypedDict", - "OCRResponse", - "OCRResponseTypedDict", - "OCRTableObject", - "OCRTableObjectTypedDict", - "OCRUsageInfo", - "OCRUsageInfoTypedDict", - "Object", - "One", - "OneTypedDict", - "OutputContentChunks", - "OutputContentChunksTypedDict", - "Outputs", - "OutputsTypedDict", - "PaginationInfo", - "PaginationInfoTypedDict", - "Prediction", - "PredictionTypedDict", - "ProcessingStatusOut", - "ProcessingStatusOutTypedDict", - "QueryParamAgentVersion", - "QueryParamAgentVersionTypedDict", - "QueryParamStatus", - "RealtimeTranscriptionError", - "RealtimeTranscriptionErrorDetail", - "RealtimeTranscriptionErrorDetailTypedDict", - "RealtimeTranscriptionErrorTypedDict", - "RealtimeTranscriptionSession", - "RealtimeTranscriptionSessionCreated", - "RealtimeTranscriptionSessionCreatedTypedDict", - "RealtimeTranscriptionSessionTypedDict", - "RealtimeTranscriptionSessionUpdated", - "RealtimeTranscriptionSessionUpdatedTypedDict", - "ReferenceChunk", - "ReferenceChunkType", - "ReferenceChunkTypedDict", - "Repositories", - "RepositoriesTypedDict", - "RequestSource", - "Response1", - "Response1TypedDict", - "ResponseBody", - "ResponseBodyTypedDict", - "ResponseDoneEvent", - "ResponseDoneEventType", - "ResponseDoneEventTypedDict", - "ResponseErrorEvent", - "ResponseErrorEventType", - "ResponseErrorEventTypedDict", - "ResponseFormat", - "ResponseFormatTypedDict", - "ResponseFormats", - "ResponseStartedEvent", - "ResponseStartedEventType", - "ResponseStartedEventTypedDict", - "ResponseValidationError", - "RetrieveFileOut", - "RetrieveFileOutTypedDict", - "RetrieveModelV1ModelsModelIDGetRequest", - "RetrieveModelV1ModelsModelIDGetRequestTypedDict", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", - "Role", - "SDKError", - "SSETypes", - "SampleType", - "Security", - "SecurityTypedDict", - "ShareEnum", - "SharingDelete", - "SharingDeleteTypedDict", - "SharingIn", - "SharingInTypedDict", - "SharingOut", - "SharingOutTypedDict", - "Source", - "Status", - "Stop", - "StopTypedDict", - "SystemMessage", - "SystemMessageContent", - "SystemMessageContentChunks", - "SystemMessageContentChunksTypedDict", - "SystemMessageContentTypedDict", - "SystemMessageTypedDict", - "TableFormat", - "TextChunk", - "TextChunkType", - "TextChunkTypedDict", - "ThinkChunk", - "ThinkChunkType", - "ThinkChunkTypedDict", - "Thinking", - "ThinkingTypedDict", - "TimestampGranularity", - "Tool", - "ToolCall", - "ToolCallTypedDict", - "ToolChoice", - "ToolChoiceEnum", - "ToolChoiceTypedDict", - "ToolExecutionDeltaEvent", - "ToolExecutionDeltaEventName", - "ToolExecutionDeltaEventNameTypedDict", - "ToolExecutionDeltaEventType", - "ToolExecutionDeltaEventTypedDict", - "ToolExecutionDoneEvent", - "ToolExecutionDoneEventName", - "ToolExecutionDoneEventNameTypedDict", - "ToolExecutionDoneEventType", - "ToolExecutionDoneEventTypedDict", - "ToolExecutionEntry", - "ToolExecutionEntryObject", - "ToolExecutionEntryType", - "ToolExecutionEntryTypedDict", - "ToolExecutionStartedEvent", - "ToolExecutionStartedEventName", - "ToolExecutionStartedEventNameTypedDict", - "ToolExecutionStartedEventType", - "ToolExecutionStartedEventTypedDict", - "ToolFileChunk", - "ToolFileChunkTool", - "ToolFileChunkToolTypedDict", - "ToolFileChunkType", - "ToolFileChunkTypedDict", - "ToolMessage", - "ToolMessageContent", - "ToolMessageContentTypedDict", - "ToolMessageRole", - "ToolMessageTypedDict", - "ToolReferenceChunk", - "ToolReferenceChunkTool", - "ToolReferenceChunkToolTypedDict", - "ToolReferenceChunkType", - "ToolReferenceChunkTypedDict", - "ToolTypedDict", - "ToolTypes", - "Tools", - "ToolsTypedDict", - "TrainingFile", - "TrainingFileTypedDict", - "TranscriptionResponse", - "TranscriptionResponseTypedDict", - "TranscriptionSegmentChunk", - "TranscriptionSegmentChunkTypedDict", - "TranscriptionStreamDone", - "TranscriptionStreamDoneType", - "TranscriptionStreamDoneTypedDict", - "TranscriptionStreamEventTypes", - "TranscriptionStreamEvents", - "TranscriptionStreamEventsData", - "TranscriptionStreamEventsDataTypedDict", - "TranscriptionStreamEventsTypedDict", - "TranscriptionStreamLanguage", - "TranscriptionStreamLanguageType", - "TranscriptionStreamLanguageTypedDict", - "TranscriptionStreamSegmentDelta", - "TranscriptionStreamSegmentDeltaType", - "TranscriptionStreamSegmentDeltaTypedDict", - "TranscriptionStreamTextDelta", - "TranscriptionStreamTextDeltaType", - "TranscriptionStreamTextDeltaTypedDict", - "Two", - "TwoTypedDict", - "Type", - "UnarchiveFTModelOut", - "UnarchiveFTModelOutObject", - "UnarchiveFTModelOutTypedDict", - "UpdateFTModelIn", - "UpdateFTModelInTypedDict", - "UploadFileOut", - "UploadFileOutTypedDict", - "UsageInfo", - "UsageInfoTypedDict", - "UserMessage", - "UserMessageContent", - "UserMessageContentTypedDict", - "UserMessageRole", - "UserMessageTypedDict", - "ValidationError", - "ValidationErrorTypedDict", - "WandbIntegration", - "WandbIntegrationOut", - "WandbIntegrationOutType", - "WandbIntegrationOutTypedDict", - "WandbIntegrationType", - "WandbIntegrationTypedDict", - "WebSearchPremiumTool", - "WebSearchPremiumToolType", - "WebSearchPremiumToolTypedDict", - "WebSearchTool", - "WebSearchToolType", - "WebSearchToolTypedDict", -] - -_dynamic_imports: dict[str, str] = { - "Agent": ".agent", - "AgentObject": ".agent", - "AgentTools": ".agent", - "AgentToolsTypedDict": ".agent", - "AgentTypedDict": ".agent", - "AgentAliasResponse": ".agentaliasresponse", - "AgentAliasResponseTypedDict": ".agentaliasresponse", - "AgentConversation": ".agentconversation", - "AgentConversationAgentVersion": ".agentconversation", - "AgentConversationAgentVersionTypedDict": ".agentconversation", - "AgentConversationObject": ".agentconversation", - "AgentConversationTypedDict": ".agentconversation", - "AgentCreationRequest": ".agentcreationrequest", - "AgentCreationRequestTools": ".agentcreationrequest", - "AgentCreationRequestToolsTypedDict": ".agentcreationrequest", - "AgentCreationRequestTypedDict": ".agentcreationrequest", - "AgentHandoffDoneEvent": ".agenthandoffdoneevent", - "AgentHandoffDoneEventType": ".agenthandoffdoneevent", - "AgentHandoffDoneEventTypedDict": ".agenthandoffdoneevent", - "AgentHandoffEntry": ".agenthandoffentry", - "AgentHandoffEntryObject": ".agenthandoffentry", - "AgentHandoffEntryType": ".agenthandoffentry", - "AgentHandoffEntryTypedDict": ".agenthandoffentry", - "AgentHandoffStartedEvent": ".agenthandoffstartedevent", - "AgentHandoffStartedEventType": ".agenthandoffstartedevent", - "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", - "AgentsAPIV1AgentsCreateOrUpdateAliasRequest": ".agents_api_v1_agents_create_or_update_aliasop", - "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict": ".agents_api_v1_agents_create_or_update_aliasop", - "AgentsAPIV1AgentsDeleteRequest": ".agents_api_v1_agents_deleteop", - "AgentsAPIV1AgentsDeleteRequestTypedDict": ".agents_api_v1_agents_deleteop", - "AgentsAPIV1AgentsGetVersionRequest": ".agents_api_v1_agents_get_versionop", - "AgentsAPIV1AgentsGetVersionRequestTypedDict": ".agents_api_v1_agents_get_versionop", - "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", - "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", - "QueryParamAgentVersion": ".agents_api_v1_agents_getop", - "QueryParamAgentVersionTypedDict": ".agents_api_v1_agents_getop", - "AgentsAPIV1AgentsListVersionAliasesRequest": ".agents_api_v1_agents_list_version_aliasesop", - "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict": ".agents_api_v1_agents_list_version_aliasesop", - "AgentsAPIV1AgentsListVersionsRequest": ".agents_api_v1_agents_list_versionsop", - "AgentsAPIV1AgentsListVersionsRequestTypedDict": ".agents_api_v1_agents_list_versionsop", - "AgentsAPIV1AgentsListRequest": ".agents_api_v1_agents_listop", - "AgentsAPIV1AgentsListRequestTypedDict": ".agents_api_v1_agents_listop", - "AgentsAPIV1AgentsUpdateVersionRequest": ".agents_api_v1_agents_update_versionop", - "AgentsAPIV1AgentsUpdateVersionRequestTypedDict": ".agents_api_v1_agents_update_versionop", - "AgentsAPIV1AgentsUpdateRequest": ".agents_api_v1_agents_updateop", - "AgentsAPIV1AgentsUpdateRequestTypedDict": ".agents_api_v1_agents_updateop", - "AgentsAPIV1ConversationsAppendStreamRequest": ".agents_api_v1_conversations_append_streamop", - "AgentsAPIV1ConversationsAppendStreamRequestTypedDict": ".agents_api_v1_conversations_append_streamop", - "AgentsAPIV1ConversationsAppendRequest": ".agents_api_v1_conversations_appendop", - "AgentsAPIV1ConversationsAppendRequestTypedDict": ".agents_api_v1_conversations_appendop", - "AgentsAPIV1ConversationsDeleteRequest": ".agents_api_v1_conversations_deleteop", - "AgentsAPIV1ConversationsDeleteRequestTypedDict": ".agents_api_v1_conversations_deleteop", - "AgentsAPIV1ConversationsGetRequest": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsGetRequestTypedDict": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGet": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsHistoryRequest": ".agents_api_v1_conversations_historyop", - "AgentsAPIV1ConversationsHistoryRequestTypedDict": ".agents_api_v1_conversations_historyop", - "AgentsAPIV1ConversationsListRequest": ".agents_api_v1_conversations_listop", - "AgentsAPIV1ConversationsListRequestTypedDict": ".agents_api_v1_conversations_listop", - "ResponseBody": ".agents_api_v1_conversations_listop", - "ResponseBodyTypedDict": ".agents_api_v1_conversations_listop", - "AgentsAPIV1ConversationsMessagesRequest": ".agents_api_v1_conversations_messagesop", - "AgentsAPIV1ConversationsMessagesRequestTypedDict": ".agents_api_v1_conversations_messagesop", - "AgentsAPIV1ConversationsRestartStreamRequest": ".agents_api_v1_conversations_restart_streamop", - "AgentsAPIV1ConversationsRestartStreamRequestTypedDict": ".agents_api_v1_conversations_restart_streamop", - "AgentsAPIV1ConversationsRestartRequest": ".agents_api_v1_conversations_restartop", - "AgentsAPIV1ConversationsRestartRequestTypedDict": ".agents_api_v1_conversations_restartop", - "AgentsCompletionRequest": ".agentscompletionrequest", - "AgentsCompletionRequestMessages": ".agentscompletionrequest", - "AgentsCompletionRequestMessagesTypedDict": ".agentscompletionrequest", - "AgentsCompletionRequestStop": ".agentscompletionrequest", - "AgentsCompletionRequestStopTypedDict": ".agentscompletionrequest", - "AgentsCompletionRequestToolChoice": ".agentscompletionrequest", - "AgentsCompletionRequestToolChoiceTypedDict": ".agentscompletionrequest", - "AgentsCompletionRequestTypedDict": ".agentscompletionrequest", - "AgentsCompletionStreamRequest": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestMessages": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestMessagesTypedDict": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestStop": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestStopTypedDict": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestToolChoice": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestToolChoiceTypedDict": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestTypedDict": ".agentscompletionstreamrequest", - "AgentUpdateRequest": ".agentupdaterequest", - "AgentUpdateRequestTools": ".agentupdaterequest", - "AgentUpdateRequestToolsTypedDict": ".agentupdaterequest", - "AgentUpdateRequestTypedDict": ".agentupdaterequest", - "APIEndpoint": ".apiendpoint", - "ArchiveFTModelOut": ".archiveftmodelout", - "ArchiveFTModelOutObject": ".archiveftmodelout", - "ArchiveFTModelOutTypedDict": ".archiveftmodelout", - "AssistantMessage": ".assistantmessage", - "AssistantMessageContent": ".assistantmessage", - "AssistantMessageContentTypedDict": ".assistantmessage", - "AssistantMessageRole": ".assistantmessage", - "AssistantMessageTypedDict": ".assistantmessage", - "AudioChunk": ".audiochunk", - "AudioChunkType": ".audiochunk", - "AudioChunkTypedDict": ".audiochunk", - "AudioEncoding": ".audioencoding", - "AudioFormat": ".audioformat", - "AudioFormatTypedDict": ".audioformat", - "AudioTranscriptionRequest": ".audiotranscriptionrequest", - "AudioTranscriptionRequestTypedDict": ".audiotranscriptionrequest", - "AudioTranscriptionRequestStream": ".audiotranscriptionrequeststream", - "AudioTranscriptionRequestStreamTypedDict": ".audiotranscriptionrequeststream", - "BaseModelCard": ".basemodelcard", - "BaseModelCardType": ".basemodelcard", - "BaseModelCardTypedDict": ".basemodelcard", - "BatchError": ".batcherror", - "BatchErrorTypedDict": ".batcherror", - "BatchJobIn": ".batchjobin", - "BatchJobInTypedDict": ".batchjobin", - "BatchJobOut": ".batchjobout", - "BatchJobOutObject": ".batchjobout", - "BatchJobOutTypedDict": ".batchjobout", - "BatchJobsOut": ".batchjobsout", - "BatchJobsOutObject": ".batchjobsout", - "BatchJobsOutTypedDict": ".batchjobsout", - "BatchJobStatus": ".batchjobstatus", - "BatchRequest": ".batchrequest", - "BatchRequestTypedDict": ".batchrequest", - "BuiltInConnectors": ".builtinconnectors", - "ChatClassificationRequest": ".chatclassificationrequest", - "ChatClassificationRequestTypedDict": ".chatclassificationrequest", - "ChatCompletionChoice": ".chatcompletionchoice", - "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", - "FinishReason": ".chatcompletionchoice", - "ChatCompletionRequest": ".chatcompletionrequest", - "ChatCompletionRequestToolChoice": ".chatcompletionrequest", - "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", - "ChatCompletionRequestTypedDict": ".chatcompletionrequest", - "Messages": ".chatcompletionrequest", - "MessagesTypedDict": ".chatcompletionrequest", - "Stop": ".chatcompletionrequest", - "StopTypedDict": ".chatcompletionrequest", - "ChatCompletionResponse": ".chatcompletionresponse", - "ChatCompletionResponseTypedDict": ".chatcompletionresponse", - "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestMessages": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestMessagesTypedDict": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestStop": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestStopTypedDict": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", - "ChatModerationRequest": ".chatmoderationrequest", - "ChatModerationRequestInputs": ".chatmoderationrequest", - "ChatModerationRequestInputsTypedDict": ".chatmoderationrequest", - "ChatModerationRequestTypedDict": ".chatmoderationrequest", - "One": ".chatmoderationrequest", - "OneTypedDict": ".chatmoderationrequest", - "Two": ".chatmoderationrequest", - "TwoTypedDict": ".chatmoderationrequest", - "CheckpointOut": ".checkpointout", - "CheckpointOutTypedDict": ".checkpointout", - "ClassificationRequest": ".classificationrequest", - "ClassificationRequestInputs": ".classificationrequest", - "ClassificationRequestInputsTypedDict": ".classificationrequest", - "ClassificationRequestTypedDict": ".classificationrequest", - "ClassificationResponse": ".classificationresponse", - "ClassificationResponseTypedDict": ".classificationresponse", - "ClassificationTargetResult": ".classificationtargetresult", - "ClassificationTargetResultTypedDict": ".classificationtargetresult", - "ClassifierDetailedJobOut": ".classifierdetailedjobout", - "ClassifierDetailedJobOutIntegrations": ".classifierdetailedjobout", - "ClassifierDetailedJobOutIntegrationsTypedDict": ".classifierdetailedjobout", - "ClassifierDetailedJobOutJobType": ".classifierdetailedjobout", - "ClassifierDetailedJobOutObject": ".classifierdetailedjobout", - "ClassifierDetailedJobOutStatus": ".classifierdetailedjobout", - "ClassifierDetailedJobOutTypedDict": ".classifierdetailedjobout", - "ClassifierFTModelOut": ".classifierftmodelout", - "ClassifierFTModelOutModelType": ".classifierftmodelout", - "ClassifierFTModelOutObject": ".classifierftmodelout", - "ClassifierFTModelOutTypedDict": ".classifierftmodelout", - "ClassifierJobOut": ".classifierjobout", - "ClassifierJobOutIntegrations": ".classifierjobout", - "ClassifierJobOutIntegrationsTypedDict": ".classifierjobout", - "ClassifierJobOutJobType": ".classifierjobout", - "ClassifierJobOutObject": ".classifierjobout", - "ClassifierJobOutStatus": ".classifierjobout", - "ClassifierJobOutTypedDict": ".classifierjobout", - "ClassifierTargetIn": ".classifiertargetin", - "ClassifierTargetInTypedDict": ".classifiertargetin", - "ClassifierTargetOut": ".classifiertargetout", - "ClassifierTargetOutTypedDict": ".classifiertargetout", - "ClassifierTrainingParameters": ".classifiertrainingparameters", - "ClassifierTrainingParametersTypedDict": ".classifiertrainingparameters", - "ClassifierTrainingParametersIn": ".classifiertrainingparametersin", - "ClassifierTrainingParametersInTypedDict": ".classifiertrainingparametersin", - "CodeInterpreterTool": ".codeinterpretertool", - "CodeInterpreterToolType": ".codeinterpretertool", - "CodeInterpreterToolTypedDict": ".codeinterpretertool", - "CompletionArgs": ".completionargs", - "CompletionArgsTypedDict": ".completionargs", - "CompletionArgsStop": ".completionargsstop", - "CompletionArgsStopTypedDict": ".completionargsstop", - "CompletionChunk": ".completionchunk", - "CompletionChunkTypedDict": ".completionchunk", - "CompletionDetailedJobOut": ".completiondetailedjobout", - "CompletionDetailedJobOutIntegrations": ".completiondetailedjobout", - "CompletionDetailedJobOutIntegrationsTypedDict": ".completiondetailedjobout", - "CompletionDetailedJobOutJobType": ".completiondetailedjobout", - "CompletionDetailedJobOutObject": ".completiondetailedjobout", - "CompletionDetailedJobOutRepositories": ".completiondetailedjobout", - "CompletionDetailedJobOutRepositoriesTypedDict": ".completiondetailedjobout", - "CompletionDetailedJobOutStatus": ".completiondetailedjobout", - "CompletionDetailedJobOutTypedDict": ".completiondetailedjobout", - "CompletionEvent": ".completionevent", - "CompletionEventTypedDict": ".completionevent", - "CompletionFTModelOut": ".completionftmodelout", - "CompletionFTModelOutObject": ".completionftmodelout", - "CompletionFTModelOutTypedDict": ".completionftmodelout", - "ModelType": ".completionftmodelout", - "CompletionJobOut": ".completionjobout", - "CompletionJobOutObject": ".completionjobout", - "CompletionJobOutTypedDict": ".completionjobout", - "Integrations": ".completionjobout", - "IntegrationsTypedDict": ".completionjobout", - "JobType": ".completionjobout", - "Repositories": ".completionjobout", - "RepositoriesTypedDict": ".completionjobout", - "Status": ".completionjobout", - "CompletionResponseStreamChoice": ".completionresponsestreamchoice", - "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", - "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", - "CompletionTrainingParameters": ".completiontrainingparameters", - "CompletionTrainingParametersTypedDict": ".completiontrainingparameters", - "CompletionTrainingParametersIn": ".completiontrainingparametersin", - "CompletionTrainingParametersInTypedDict": ".completiontrainingparametersin", - "ContentChunk": ".contentchunk", - "ContentChunkTypedDict": ".contentchunk", - "ConversationAppendRequest": ".conversationappendrequest", - "ConversationAppendRequestHandoffExecution": ".conversationappendrequest", - "ConversationAppendRequestTypedDict": ".conversationappendrequest", - "ConversationAppendStreamRequest": ".conversationappendstreamrequest", - "ConversationAppendStreamRequestHandoffExecution": ".conversationappendstreamrequest", - "ConversationAppendStreamRequestTypedDict": ".conversationappendstreamrequest", - "ConversationEvents": ".conversationevents", - "ConversationEventsData": ".conversationevents", - "ConversationEventsDataTypedDict": ".conversationevents", - "ConversationEventsTypedDict": ".conversationevents", - "ConversationHistory": ".conversationhistory", - "ConversationHistoryObject": ".conversationhistory", - "ConversationHistoryTypedDict": ".conversationhistory", - "Entries": ".conversationhistory", - "EntriesTypedDict": ".conversationhistory", - "ConversationInputs": ".conversationinputs", - "ConversationInputsTypedDict": ".conversationinputs", - "ConversationMessages": ".conversationmessages", - "ConversationMessagesObject": ".conversationmessages", - "ConversationMessagesTypedDict": ".conversationmessages", - "AgentVersion": ".conversationrequest", - "AgentVersionTypedDict": ".conversationrequest", - "ConversationRequest": ".conversationrequest", - "ConversationRequestTypedDict": ".conversationrequest", - "HandoffExecution": ".conversationrequest", - "Tools": ".conversationrequest", - "ToolsTypedDict": ".conversationrequest", - "ConversationResponse": ".conversationresponse", - "ConversationResponseObject": ".conversationresponse", - "ConversationResponseTypedDict": ".conversationresponse", - "Outputs": ".conversationresponse", - "OutputsTypedDict": ".conversationresponse", - "ConversationRestartRequest": ".conversationrestartrequest", - "ConversationRestartRequestAgentVersion": ".conversationrestartrequest", - "ConversationRestartRequestAgentVersionTypedDict": ".conversationrestartrequest", - "ConversationRestartRequestHandoffExecution": ".conversationrestartrequest", - "ConversationRestartRequestTypedDict": ".conversationrestartrequest", - "ConversationRestartStreamRequest": ".conversationrestartstreamrequest", - "ConversationRestartStreamRequestAgentVersion": ".conversationrestartstreamrequest", - "ConversationRestartStreamRequestAgentVersionTypedDict": ".conversationrestartstreamrequest", - "ConversationRestartStreamRequestHandoffExecution": ".conversationrestartstreamrequest", - "ConversationRestartStreamRequestTypedDict": ".conversationrestartstreamrequest", - "ConversationStreamRequest": ".conversationstreamrequest", - "ConversationStreamRequestAgentVersion": ".conversationstreamrequest", - "ConversationStreamRequestAgentVersionTypedDict": ".conversationstreamrequest", - "ConversationStreamRequestHandoffExecution": ".conversationstreamrequest", - "ConversationStreamRequestTools": ".conversationstreamrequest", - "ConversationStreamRequestToolsTypedDict": ".conversationstreamrequest", - "ConversationStreamRequestTypedDict": ".conversationstreamrequest", - "ConversationUsageInfo": ".conversationusageinfo", - "ConversationUsageInfoTypedDict": ".conversationusageinfo", - "DeleteModelV1ModelsModelIDDeleteRequest": ".delete_model_v1_models_model_id_deleteop", - "DeleteModelV1ModelsModelIDDeleteRequestTypedDict": ".delete_model_v1_models_model_id_deleteop", - "DeleteFileOut": ".deletefileout", - "DeleteFileOutTypedDict": ".deletefileout", - "DeleteModelOut": ".deletemodelout", - "DeleteModelOutTypedDict": ".deletemodelout", - "Content": ".deltamessage", - "ContentTypedDict": ".deltamessage", - "DeltaMessage": ".deltamessage", - "DeltaMessageTypedDict": ".deltamessage", - "DocumentLibraryTool": ".documentlibrarytool", - "DocumentLibraryToolType": ".documentlibrarytool", - "DocumentLibraryToolTypedDict": ".documentlibrarytool", - "DocumentOut": ".documentout", - "DocumentOutTypedDict": ".documentout", - "DocumentTextContent": ".documenttextcontent", - "DocumentTextContentTypedDict": ".documenttextcontent", - "Attributes": ".documentupdatein", - "AttributesTypedDict": ".documentupdatein", - "DocumentUpdateIn": ".documentupdatein", - "DocumentUpdateInTypedDict": ".documentupdatein", - "DocumentURLChunk": ".documenturlchunk", - "DocumentURLChunkType": ".documenturlchunk", - "DocumentURLChunkTypedDict": ".documenturlchunk", - "EmbeddingDtype": ".embeddingdtype", - "EmbeddingRequest": ".embeddingrequest", - "EmbeddingRequestInputs": ".embeddingrequest", - "EmbeddingRequestInputsTypedDict": ".embeddingrequest", - "EmbeddingRequestTypedDict": ".embeddingrequest", - "EmbeddingResponse": ".embeddingresponse", - "EmbeddingResponseTypedDict": ".embeddingresponse", - "EmbeddingResponseData": ".embeddingresponsedata", - "EmbeddingResponseDataTypedDict": ".embeddingresponsedata", - "EncodingFormat": ".encodingformat", - "EntityType": ".entitytype", - "EventOut": ".eventout", - "EventOutTypedDict": ".eventout", - "File": ".file", - "FileTypedDict": ".file", - "FileChunk": ".filechunk", - "FileChunkTypedDict": ".filechunk", - "FilePurpose": ".filepurpose", - "FilesAPIRoutesDeleteFileRequest": ".files_api_routes_delete_fileop", - "FilesAPIRoutesDeleteFileRequestTypedDict": ".files_api_routes_delete_fileop", - "FilesAPIRoutesDownloadFileRequest": ".files_api_routes_download_fileop", - "FilesAPIRoutesDownloadFileRequestTypedDict": ".files_api_routes_download_fileop", - "FilesAPIRoutesGetSignedURLRequest": ".files_api_routes_get_signed_urlop", - "FilesAPIRoutesGetSignedURLRequestTypedDict": ".files_api_routes_get_signed_urlop", - "FilesAPIRoutesListFilesRequest": ".files_api_routes_list_filesop", - "FilesAPIRoutesListFilesRequestTypedDict": ".files_api_routes_list_filesop", - "FilesAPIRoutesRetrieveFileRequest": ".files_api_routes_retrieve_fileop", - "FilesAPIRoutesRetrieveFileRequestTypedDict": ".files_api_routes_retrieve_fileop", - "FilesAPIRoutesUploadFileMultiPartBodyParams": ".files_api_routes_upload_fileop", - "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict": ".files_api_routes_upload_fileop", - "FileSchema": ".fileschema", - "FileSchemaTypedDict": ".fileschema", - "FileSignedURL": ".filesignedurl", - "FileSignedURLTypedDict": ".filesignedurl", - "FIMCompletionRequest": ".fimcompletionrequest", - "FIMCompletionRequestStop": ".fimcompletionrequest", - "FIMCompletionRequestStopTypedDict": ".fimcompletionrequest", - "FIMCompletionRequestTypedDict": ".fimcompletionrequest", - "FIMCompletionResponse": ".fimcompletionresponse", - "FIMCompletionResponseTypedDict": ".fimcompletionresponse", - "FIMCompletionStreamRequest": ".fimcompletionstreamrequest", - "FIMCompletionStreamRequestStop": ".fimcompletionstreamrequest", - "FIMCompletionStreamRequestStopTypedDict": ".fimcompletionstreamrequest", - "FIMCompletionStreamRequestTypedDict": ".fimcompletionstreamrequest", - "FineTuneableModelType": ".finetuneablemodeltype", - "FTClassifierLossFunction": ".ftclassifierlossfunction", - "FTModelCapabilitiesOut": ".ftmodelcapabilitiesout", - "FTModelCapabilitiesOutTypedDict": ".ftmodelcapabilitiesout", - "FTModelCard": ".ftmodelcard", - "FTModelCardType": ".ftmodelcard", - "FTModelCardTypedDict": ".ftmodelcard", - "Function": ".function", - "FunctionTypedDict": ".function", - "Arguments": ".functioncall", - "ArgumentsTypedDict": ".functioncall", - "FunctionCall": ".functioncall", - "FunctionCallTypedDict": ".functioncall", - "FunctionCallEntry": ".functioncallentry", - "FunctionCallEntryObject": ".functioncallentry", - "FunctionCallEntryType": ".functioncallentry", - "FunctionCallEntryTypedDict": ".functioncallentry", - "FunctionCallEntryArguments": ".functioncallentryarguments", - "FunctionCallEntryArgumentsTypedDict": ".functioncallentryarguments", - "FunctionCallEvent": ".functioncallevent", - "FunctionCallEventType": ".functioncallevent", - "FunctionCallEventTypedDict": ".functioncallevent", - "FunctionName": ".functionname", - "FunctionNameTypedDict": ".functionname", - "FunctionResultEntry": ".functionresultentry", - "FunctionResultEntryObject": ".functionresultentry", - "FunctionResultEntryType": ".functionresultentry", - "FunctionResultEntryTypedDict": ".functionresultentry", - "FunctionTool": ".functiontool", - "FunctionToolType": ".functiontool", - "FunctionToolTypedDict": ".functiontool", - "GithubRepositoryIn": ".githubrepositoryin", - "GithubRepositoryInType": ".githubrepositoryin", - "GithubRepositoryInTypedDict": ".githubrepositoryin", - "GithubRepositoryOut": ".githubrepositoryout", - "GithubRepositoryOutType": ".githubrepositoryout", - "GithubRepositoryOutTypedDict": ".githubrepositoryout", - "HTTPValidationError": ".httpvalidationerror", - "HTTPValidationErrorData": ".httpvalidationerror", - "ImageGenerationTool": ".imagegenerationtool", - "ImageGenerationToolType": ".imagegenerationtool", - "ImageGenerationToolTypedDict": ".imagegenerationtool", - "ImageURL": ".imageurl", - "ImageURLTypedDict": ".imageurl", - "ImageURLChunk": ".imageurlchunk", - "ImageURLChunkImageURL": ".imageurlchunk", - "ImageURLChunkImageURLTypedDict": ".imageurlchunk", - "ImageURLChunkType": ".imageurlchunk", - "ImageURLChunkTypedDict": ".imageurlchunk", - "InputEntries": ".inputentries", - "InputEntriesTypedDict": ".inputentries", - "Inputs": ".inputs", - "InputsTypedDict": ".inputs", - "InstructRequestInputs": ".inputs", - "InstructRequestInputsMessages": ".inputs", - "InstructRequestInputsMessagesTypedDict": ".inputs", - "InstructRequestInputsTypedDict": ".inputs", - "InstructRequest": ".instructrequest", - "InstructRequestMessages": ".instructrequest", - "InstructRequestMessagesTypedDict": ".instructrequest", - "InstructRequestTypedDict": ".instructrequest", - "Hyperparameters": ".jobin", - "HyperparametersTypedDict": ".jobin", - "JobIn": ".jobin", - "JobInIntegrations": ".jobin", - "JobInIntegrationsTypedDict": ".jobin", - "JobInRepositories": ".jobin", - "JobInRepositoriesTypedDict": ".jobin", - "JobInTypedDict": ".jobin", - "JobMetadataOut": ".jobmetadataout", - "JobMetadataOutTypedDict": ".jobmetadataout", - "JobsAPIRoutesBatchCancelBatchJobRequest": ".jobs_api_routes_batch_cancel_batch_jobop", - "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict": ".jobs_api_routes_batch_cancel_batch_jobop", - "JobsAPIRoutesBatchGetBatchJobRequest": ".jobs_api_routes_batch_get_batch_jobop", - "JobsAPIRoutesBatchGetBatchJobRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobop", - "JobsAPIRoutesBatchGetBatchJobsRequest": ".jobs_api_routes_batch_get_batch_jobsop", - "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobsop", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequest": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponse": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponse": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "Response1": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "Response1TypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobResponse": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", - "QueryParamStatus": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", - "JobsAPIRoutesFineTuningStartFineTuningJobRequest": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningStartFineTuningJobResponse": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", - "JobsOut": ".jobsout", - "JobsOutData": ".jobsout", - "JobsOutDataTypedDict": ".jobsout", - "JobsOutObject": ".jobsout", - "JobsOutTypedDict": ".jobsout", - "JSONSchema": ".jsonschema", - "JSONSchemaTypedDict": ".jsonschema", - "LegacyJobMetadataOut": ".legacyjobmetadataout", - "LegacyJobMetadataOutObject": ".legacyjobmetadataout", - "LegacyJobMetadataOutTypedDict": ".legacyjobmetadataout", - "LibrariesDeleteV1Request": ".libraries_delete_v1op", - "LibrariesDeleteV1RequestTypedDict": ".libraries_delete_v1op", - "LibrariesDocumentsDeleteV1Request": ".libraries_documents_delete_v1op", - "LibrariesDocumentsDeleteV1RequestTypedDict": ".libraries_documents_delete_v1op", - "LibrariesDocumentsGetExtractedTextSignedURLV1Request": ".libraries_documents_get_extracted_text_signed_url_v1op", - "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict": ".libraries_documents_get_extracted_text_signed_url_v1op", - "LibrariesDocumentsGetSignedURLV1Request": ".libraries_documents_get_signed_url_v1op", - "LibrariesDocumentsGetSignedURLV1RequestTypedDict": ".libraries_documents_get_signed_url_v1op", - "LibrariesDocumentsGetStatusV1Request": ".libraries_documents_get_status_v1op", - "LibrariesDocumentsGetStatusV1RequestTypedDict": ".libraries_documents_get_status_v1op", - "LibrariesDocumentsGetTextContentV1Request": ".libraries_documents_get_text_content_v1op", - "LibrariesDocumentsGetTextContentV1RequestTypedDict": ".libraries_documents_get_text_content_v1op", - "LibrariesDocumentsGetV1Request": ".libraries_documents_get_v1op", - "LibrariesDocumentsGetV1RequestTypedDict": ".libraries_documents_get_v1op", - "LibrariesDocumentsListV1Request": ".libraries_documents_list_v1op", - "LibrariesDocumentsListV1RequestTypedDict": ".libraries_documents_list_v1op", - "LibrariesDocumentsReprocessV1Request": ".libraries_documents_reprocess_v1op", - "LibrariesDocumentsReprocessV1RequestTypedDict": ".libraries_documents_reprocess_v1op", - "LibrariesDocumentsUpdateV1Request": ".libraries_documents_update_v1op", - "LibrariesDocumentsUpdateV1RequestTypedDict": ".libraries_documents_update_v1op", - "LibrariesDocumentsUploadV1DocumentUpload": ".libraries_documents_upload_v1op", - "LibrariesDocumentsUploadV1DocumentUploadTypedDict": ".libraries_documents_upload_v1op", - "LibrariesDocumentsUploadV1Request": ".libraries_documents_upload_v1op", - "LibrariesDocumentsUploadV1RequestTypedDict": ".libraries_documents_upload_v1op", - "LibrariesGetV1Request": ".libraries_get_v1op", - "LibrariesGetV1RequestTypedDict": ".libraries_get_v1op", - "LibrariesShareCreateV1Request": ".libraries_share_create_v1op", - "LibrariesShareCreateV1RequestTypedDict": ".libraries_share_create_v1op", - "LibrariesShareDeleteV1Request": ".libraries_share_delete_v1op", - "LibrariesShareDeleteV1RequestTypedDict": ".libraries_share_delete_v1op", - "LibrariesShareListV1Request": ".libraries_share_list_v1op", - "LibrariesShareListV1RequestTypedDict": ".libraries_share_list_v1op", - "LibrariesUpdateV1Request": ".libraries_update_v1op", - "LibrariesUpdateV1RequestTypedDict": ".libraries_update_v1op", - "LibraryIn": ".libraryin", - "LibraryInTypedDict": ".libraryin", - "LibraryInUpdate": ".libraryinupdate", - "LibraryInUpdateTypedDict": ".libraryinupdate", - "LibraryOut": ".libraryout", - "LibraryOutTypedDict": ".libraryout", - "ListDocumentOut": ".listdocumentout", - "ListDocumentOutTypedDict": ".listdocumentout", - "ListFilesOut": ".listfilesout", - "ListFilesOutTypedDict": ".listfilesout", - "ListLibraryOut": ".listlibraryout", - "ListLibraryOutTypedDict": ".listlibraryout", - "ListSharingOut": ".listsharingout", - "ListSharingOutTypedDict": ".listsharingout", - "MessageEntries": ".messageentries", - "MessageEntriesTypedDict": ".messageentries", - "MessageInputContentChunks": ".messageinputcontentchunks", - "MessageInputContentChunksTypedDict": ".messageinputcontentchunks", - "MessageInputEntry": ".messageinputentry", - "MessageInputEntryContent": ".messageinputentry", - "MessageInputEntryContentTypedDict": ".messageinputentry", - "MessageInputEntryRole": ".messageinputentry", - "MessageInputEntryType": ".messageinputentry", - "MessageInputEntryTypedDict": ".messageinputentry", - "Object": ".messageinputentry", - "MessageOutputContentChunks": ".messageoutputcontentchunks", - "MessageOutputContentChunksTypedDict": ".messageoutputcontentchunks", - "MessageOutputEntry": ".messageoutputentry", - "MessageOutputEntryContent": ".messageoutputentry", - "MessageOutputEntryContentTypedDict": ".messageoutputentry", - "MessageOutputEntryObject": ".messageoutputentry", - "MessageOutputEntryRole": ".messageoutputentry", - "MessageOutputEntryType": ".messageoutputentry", - "MessageOutputEntryTypedDict": ".messageoutputentry", - "MessageOutputEvent": ".messageoutputevent", - "MessageOutputEventContent": ".messageoutputevent", - "MessageOutputEventContentTypedDict": ".messageoutputevent", - "MessageOutputEventRole": ".messageoutputevent", - "MessageOutputEventType": ".messageoutputevent", - "MessageOutputEventTypedDict": ".messageoutputevent", - "MetricOut": ".metricout", - "MetricOutTypedDict": ".metricout", - "MistralPromptMode": ".mistralpromptmode", - "ModelCapabilities": ".modelcapabilities", - "ModelCapabilitiesTypedDict": ".modelcapabilities", - "ModelConversation": ".modelconversation", - "ModelConversationObject": ".modelconversation", - "ModelConversationTools": ".modelconversation", - "ModelConversationToolsTypedDict": ".modelconversation", - "ModelConversationTypedDict": ".modelconversation", - "Data": ".modellist", - "DataTypedDict": ".modellist", - "ModelList": ".modellist", - "ModelListTypedDict": ".modellist", - "ModerationObject": ".moderationobject", - "ModerationObjectTypedDict": ".moderationobject", - "ModerationResponse": ".moderationresponse", - "ModerationResponseTypedDict": ".moderationresponse", - "NoResponseError": ".no_response_error", - "OCRImageObject": ".ocrimageobject", - "OCRImageObjectTypedDict": ".ocrimageobject", - "OCRPageDimensions": ".ocrpagedimensions", - "OCRPageDimensionsTypedDict": ".ocrpagedimensions", - "OCRPageObject": ".ocrpageobject", - "OCRPageObjectTypedDict": ".ocrpageobject", - "Document": ".ocrrequest", - "DocumentTypedDict": ".ocrrequest", - "OCRRequest": ".ocrrequest", - "OCRRequestTypedDict": ".ocrrequest", - "TableFormat": ".ocrrequest", - "OCRResponse": ".ocrresponse", - "OCRResponseTypedDict": ".ocrresponse", - "Format": ".ocrtableobject", - "OCRTableObject": ".ocrtableobject", - "OCRTableObjectTypedDict": ".ocrtableobject", - "OCRUsageInfo": ".ocrusageinfo", - "OCRUsageInfoTypedDict": ".ocrusageinfo", - "OutputContentChunks": ".outputcontentchunks", - "OutputContentChunksTypedDict": ".outputcontentchunks", - "PaginationInfo": ".paginationinfo", - "PaginationInfoTypedDict": ".paginationinfo", - "Prediction": ".prediction", - "PredictionTypedDict": ".prediction", - "ProcessingStatusOut": ".processingstatusout", - "ProcessingStatusOutTypedDict": ".processingstatusout", - "RealtimeTranscriptionError": ".realtimetranscriptionerror", - "RealtimeTranscriptionErrorTypedDict": ".realtimetranscriptionerror", - "Message": ".realtimetranscriptionerrordetail", - "MessageTypedDict": ".realtimetranscriptionerrordetail", - "RealtimeTranscriptionErrorDetail": ".realtimetranscriptionerrordetail", - "RealtimeTranscriptionErrorDetailTypedDict": ".realtimetranscriptionerrordetail", - "RealtimeTranscriptionSession": ".realtimetranscriptionsession", - "RealtimeTranscriptionSessionTypedDict": ".realtimetranscriptionsession", - "RealtimeTranscriptionSessionCreated": ".realtimetranscriptionsessioncreated", - "RealtimeTranscriptionSessionCreatedTypedDict": ".realtimetranscriptionsessioncreated", - "RealtimeTranscriptionSessionUpdated": ".realtimetranscriptionsessionupdated", - "RealtimeTranscriptionSessionUpdatedTypedDict": ".realtimetranscriptionsessionupdated", - "ReferenceChunk": ".referencechunk", - "ReferenceChunkType": ".referencechunk", - "ReferenceChunkTypedDict": ".referencechunk", - "RequestSource": ".requestsource", - "ResponseDoneEvent": ".responsedoneevent", - "ResponseDoneEventType": ".responsedoneevent", - "ResponseDoneEventTypedDict": ".responsedoneevent", - "ResponseErrorEvent": ".responseerrorevent", - "ResponseErrorEventType": ".responseerrorevent", - "ResponseErrorEventTypedDict": ".responseerrorevent", - "ResponseFormat": ".responseformat", - "ResponseFormatTypedDict": ".responseformat", - "ResponseFormats": ".responseformats", - "ResponseStartedEvent": ".responsestartedevent", - "ResponseStartedEventType": ".responsestartedevent", - "ResponseStartedEventTypedDict": ".responsestartedevent", - "ResponseValidationError": ".responsevalidationerror", - "RetrieveModelV1ModelsModelIDGetRequest": ".retrieve_model_v1_models_model_id_getop", - "RetrieveModelV1ModelsModelIDGetRequestTypedDict": ".retrieve_model_v1_models_model_id_getop", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrieve_model_v1_models_model_id_getop", - "RetrieveFileOut": ".retrievefileout", - "RetrieveFileOutTypedDict": ".retrievefileout", - "SampleType": ".sampletype", - "SDKError": ".sdkerror", - "Security": ".security", - "SecurityTypedDict": ".security", - "ShareEnum": ".shareenum", - "SharingDelete": ".sharingdelete", - "SharingDeleteTypedDict": ".sharingdelete", - "SharingIn": ".sharingin", - "SharingInTypedDict": ".sharingin", - "SharingOut": ".sharingout", - "SharingOutTypedDict": ".sharingout", - "Source": ".source", - "SSETypes": ".ssetypes", - "Role": ".systemmessage", - "SystemMessage": ".systemmessage", - "SystemMessageContent": ".systemmessage", - "SystemMessageContentTypedDict": ".systemmessage", - "SystemMessageTypedDict": ".systemmessage", - "SystemMessageContentChunks": ".systemmessagecontentchunks", - "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", - "TextChunk": ".textchunk", - "TextChunkType": ".textchunk", - "TextChunkTypedDict": ".textchunk", - "ThinkChunk": ".thinkchunk", - "ThinkChunkType": ".thinkchunk", - "ThinkChunkTypedDict": ".thinkchunk", - "Thinking": ".thinkchunk", - "ThinkingTypedDict": ".thinkchunk", - "TimestampGranularity": ".timestampgranularity", - "Tool": ".tool", - "ToolTypedDict": ".tool", - "ToolCall": ".toolcall", - "ToolCallTypedDict": ".toolcall", - "ToolChoice": ".toolchoice", - "ToolChoiceTypedDict": ".toolchoice", - "ToolChoiceEnum": ".toolchoiceenum", - "ToolExecutionDeltaEvent": ".toolexecutiondeltaevent", - "ToolExecutionDeltaEventName": ".toolexecutiondeltaevent", - "ToolExecutionDeltaEventNameTypedDict": ".toolexecutiondeltaevent", - "ToolExecutionDeltaEventType": ".toolexecutiondeltaevent", - "ToolExecutionDeltaEventTypedDict": ".toolexecutiondeltaevent", - "ToolExecutionDoneEvent": ".toolexecutiondoneevent", - "ToolExecutionDoneEventName": ".toolexecutiondoneevent", - "ToolExecutionDoneEventNameTypedDict": ".toolexecutiondoneevent", - "ToolExecutionDoneEventType": ".toolexecutiondoneevent", - "ToolExecutionDoneEventTypedDict": ".toolexecutiondoneevent", - "Name": ".toolexecutionentry", - "NameTypedDict": ".toolexecutionentry", - "ToolExecutionEntry": ".toolexecutionentry", - "ToolExecutionEntryObject": ".toolexecutionentry", - "ToolExecutionEntryType": ".toolexecutionentry", - "ToolExecutionEntryTypedDict": ".toolexecutionentry", - "ToolExecutionStartedEvent": ".toolexecutionstartedevent", - "ToolExecutionStartedEventName": ".toolexecutionstartedevent", - "ToolExecutionStartedEventNameTypedDict": ".toolexecutionstartedevent", - "ToolExecutionStartedEventType": ".toolexecutionstartedevent", - "ToolExecutionStartedEventTypedDict": ".toolexecutionstartedevent", - "ToolFileChunk": ".toolfilechunk", - "ToolFileChunkTool": ".toolfilechunk", - "ToolFileChunkToolTypedDict": ".toolfilechunk", - "ToolFileChunkType": ".toolfilechunk", - "ToolFileChunkTypedDict": ".toolfilechunk", - "ToolMessage": ".toolmessage", - "ToolMessageContent": ".toolmessage", - "ToolMessageContentTypedDict": ".toolmessage", - "ToolMessageRole": ".toolmessage", - "ToolMessageTypedDict": ".toolmessage", - "ToolReferenceChunk": ".toolreferencechunk", - "ToolReferenceChunkTool": ".toolreferencechunk", - "ToolReferenceChunkToolTypedDict": ".toolreferencechunk", - "ToolReferenceChunkType": ".toolreferencechunk", - "ToolReferenceChunkTypedDict": ".toolreferencechunk", - "ToolTypes": ".tooltypes", - "TrainingFile": ".trainingfile", - "TrainingFileTypedDict": ".trainingfile", - "TranscriptionResponse": ".transcriptionresponse", - "TranscriptionResponseTypedDict": ".transcriptionresponse", - "TranscriptionSegmentChunk": ".transcriptionsegmentchunk", - "TranscriptionSegmentChunkTypedDict": ".transcriptionsegmentchunk", - "Type": ".transcriptionsegmentchunk", - "TranscriptionStreamDone": ".transcriptionstreamdone", - "TranscriptionStreamDoneType": ".transcriptionstreamdone", - "TranscriptionStreamDoneTypedDict": ".transcriptionstreamdone", - "TranscriptionStreamEvents": ".transcriptionstreamevents", - "TranscriptionStreamEventsData": ".transcriptionstreamevents", - "TranscriptionStreamEventsDataTypedDict": ".transcriptionstreamevents", - "TranscriptionStreamEventsTypedDict": ".transcriptionstreamevents", - "TranscriptionStreamEventTypes": ".transcriptionstreameventtypes", - "TranscriptionStreamLanguage": ".transcriptionstreamlanguage", - "TranscriptionStreamLanguageType": ".transcriptionstreamlanguage", - "TranscriptionStreamLanguageTypedDict": ".transcriptionstreamlanguage", - "TranscriptionStreamSegmentDelta": ".transcriptionstreamsegmentdelta", - "TranscriptionStreamSegmentDeltaType": ".transcriptionstreamsegmentdelta", - "TranscriptionStreamSegmentDeltaTypedDict": ".transcriptionstreamsegmentdelta", - "TranscriptionStreamTextDelta": ".transcriptionstreamtextdelta", - "TranscriptionStreamTextDeltaType": ".transcriptionstreamtextdelta", - "TranscriptionStreamTextDeltaTypedDict": ".transcriptionstreamtextdelta", - "UnarchiveFTModelOut": ".unarchiveftmodelout", - "UnarchiveFTModelOutObject": ".unarchiveftmodelout", - "UnarchiveFTModelOutTypedDict": ".unarchiveftmodelout", - "UpdateFTModelIn": ".updateftmodelin", - "UpdateFTModelInTypedDict": ".updateftmodelin", - "UploadFileOut": ".uploadfileout", - "UploadFileOutTypedDict": ".uploadfileout", - "UsageInfo": ".usageinfo", - "UsageInfoTypedDict": ".usageinfo", - "UserMessage": ".usermessage", - "UserMessageContent": ".usermessage", - "UserMessageContentTypedDict": ".usermessage", - "UserMessageRole": ".usermessage", - "UserMessageTypedDict": ".usermessage", - "Loc": ".validationerror", - "LocTypedDict": ".validationerror", - "ValidationError": ".validationerror", - "ValidationErrorTypedDict": ".validationerror", - "WandbIntegration": ".wandbintegration", - "WandbIntegrationType": ".wandbintegration", - "WandbIntegrationTypedDict": ".wandbintegration", - "WandbIntegrationOut": ".wandbintegrationout", - "WandbIntegrationOutType": ".wandbintegrationout", - "WandbIntegrationOutTypedDict": ".wandbintegrationout", - "WebSearchPremiumTool": ".websearchpremiumtool", - "WebSearchPremiumToolType": ".websearchpremiumtool", - "WebSearchPremiumToolTypedDict": ".websearchpremiumtool", - "WebSearchTool": ".websearchtool", - "WebSearchToolType": ".websearchtool", - "WebSearchToolTypedDict": ".websearchtool", -} - - -def dynamic_import(modname, retries=3): - for attempt in range(retries): - try: - return import_module(modname, __package__) - except KeyError: - # Clear any half-initialized module and retry - sys.modules.pop(modname, None) - if attempt == retries - 1: - break - raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") - - -def __getattr__(attr_name: str) -> object: - module_name = _dynamic_imports.get(attr_name) - if module_name is None: - raise AttributeError( - f"No {attr_name} found in _dynamic_imports for module name -> {__name__} " - ) - - try: - module = dynamic_import(module_name) - result = getattr(module, attr_name) - return result - except ImportError as e: - raise ImportError( - f"Failed to import {attr_name} from {module_name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to get {attr_name} from {module_name}: {e}" - ) from e - - -def __dir__(): - lazy_attrs = builtins.list(_dynamic_imports.keys()) - return builtins.sorted(lazy_attrs) diff --git a/src/mistralai/models/agent.py b/src/mistralai/models/agent.py deleted file mode 100644 index eb30905b..00000000 --- a/src/mistralai/models/agent.py +++ /dev/null @@ -1,142 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict -from .functiontool import FunctionTool, FunctionToolTypedDict -from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict -from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict -from .websearchtool import WebSearchTool, WebSearchToolTypedDict -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -AgentToolsTypedDict = TypeAliasType( - "AgentToolsTypedDict", - Union[ - WebSearchToolTypedDict, - WebSearchPremiumToolTypedDict, - CodeInterpreterToolTypedDict, - ImageGenerationToolTypedDict, - FunctionToolTypedDict, - DocumentLibraryToolTypedDict, - ], -) - - -AgentTools = Annotated[ - Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -AgentObject = Literal["agent",] - - -class AgentTypedDict(TypedDict): - model: str - name: str - id: str - version: int - versions: List[int] - created_at: datetime - updated_at: datetime - deployment_chat: bool - source: str - instructions: NotRequired[Nullable[str]] - r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentToolsTypedDict]] - r"""List of tools which are available to the model during the conversation.""" - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - description: NotRequired[Nullable[str]] - handoffs: NotRequired[Nullable[List[str]]] - metadata: NotRequired[Nullable[Dict[str, Any]]] - object: NotRequired[AgentObject] - - -class Agent(BaseModel): - model: str - - name: str - - id: str - - version: int - - versions: List[int] - - created_at: datetime - - updated_at: datetime - - deployment_chat: bool - - source: str - - instructions: OptionalNullable[str] = UNSET - r"""Instruction prompt the model will follow during the conversation.""" - - tools: Optional[List[AgentTools]] = None - r"""List of tools which are available to the model during the conversation.""" - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" - - description: OptionalNullable[str] = UNSET - - handoffs: OptionalNullable[List[str]] = UNSET - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - object: Optional[AgentObject] = "agent" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "description", - "handoffs", - "metadata", - "object", - ] - nullable_fields = ["instructions", "description", "handoffs", "metadata"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agentaliasresponse.py b/src/mistralai/models/agentaliasresponse.py deleted file mode 100644 index c0928da9..00000000 --- a/src/mistralai/models/agentaliasresponse.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class AgentAliasResponseTypedDict(TypedDict): - alias: str - version: int - created_at: datetime - updated_at: datetime - - -class AgentAliasResponse(BaseModel): - alias: str - - version: int - - created_at: datetime - - updated_at: datetime diff --git a/src/mistralai/models/agentconversation.py b/src/mistralai/models/agentconversation.py deleted file mode 100644 index 6007b571..00000000 --- a/src/mistralai/models/agentconversation.py +++ /dev/null @@ -1,89 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -AgentConversationObject = Literal["conversation",] - - -AgentConversationAgentVersionTypedDict = TypeAliasType( - "AgentConversationAgentVersionTypedDict", Union[str, int] -) - - -AgentConversationAgentVersion = TypeAliasType( - "AgentConversationAgentVersion", Union[str, int] -) - - -class AgentConversationTypedDict(TypedDict): - id: str - created_at: datetime - updated_at: datetime - agent_id: str - name: NotRequired[Nullable[str]] - r"""Name given to the conversation.""" - description: NotRequired[Nullable[str]] - r"""Description of the what the conversation is about.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - r"""Custom metadata for the conversation.""" - object: NotRequired[AgentConversationObject] - agent_version: NotRequired[Nullable[AgentConversationAgentVersionTypedDict]] - - -class AgentConversation(BaseModel): - id: str - - created_at: datetime - - updated_at: datetime - - agent_id: str - - name: OptionalNullable[str] = UNSET - r"""Name given to the conversation.""" - - description: OptionalNullable[str] = UNSET - r"""Description of the what the conversation is about.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - r"""Custom metadata for the conversation.""" - - object: Optional[AgentConversationObject] = "conversation" - - agent_version: OptionalNullable[AgentConversationAgentVersion] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["name", "description", "metadata", "object", "agent_version"] - nullable_fields = ["name", "description", "metadata", "agent_version"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agentcreationrequest.py b/src/mistralai/models/agentcreationrequest.py deleted file mode 100644 index 6a14201e..00000000 --- a/src/mistralai/models/agentcreationrequest.py +++ /dev/null @@ -1,113 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict -from .functiontool import FunctionTool, FunctionToolTypedDict -from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict -from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict -from .websearchtool import WebSearchTool, WebSearchToolTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -AgentCreationRequestToolsTypedDict = TypeAliasType( - "AgentCreationRequestToolsTypedDict", - Union[ - WebSearchToolTypedDict, - WebSearchPremiumToolTypedDict, - CodeInterpreterToolTypedDict, - ImageGenerationToolTypedDict, - FunctionToolTypedDict, - DocumentLibraryToolTypedDict, - ], -) - - -AgentCreationRequestTools = Annotated[ - Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -class AgentCreationRequestTypedDict(TypedDict): - model: str - name: str - instructions: NotRequired[Nullable[str]] - r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentCreationRequestToolsTypedDict]] - r"""List of tools which are available to the model during the conversation.""" - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - description: NotRequired[Nullable[str]] - handoffs: NotRequired[Nullable[List[str]]] - metadata: NotRequired[Nullable[Dict[str, Any]]] - - -class AgentCreationRequest(BaseModel): - model: str - - name: str - - instructions: OptionalNullable[str] = UNSET - r"""Instruction prompt the model will follow during the conversation.""" - - tools: Optional[List[AgentCreationRequestTools]] = None - r"""List of tools which are available to the model during the conversation.""" - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" - - description: OptionalNullable[str] = UNSET - - handoffs: OptionalNullable[List[str]] = UNSET - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "description", - "handoffs", - "metadata", - ] - nullable_fields = ["instructions", "description", "handoffs", "metadata"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agenthandoffdoneevent.py b/src/mistralai/models/agenthandoffdoneevent.py deleted file mode 100644 index 1cdbf456..00000000 --- a/src/mistralai/models/agenthandoffdoneevent.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AgentHandoffDoneEventType = Literal["agent.handoff.done",] - - -class AgentHandoffDoneEventTypedDict(TypedDict): - id: str - next_agent_id: str - next_agent_name: str - type: NotRequired[AgentHandoffDoneEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - - -class AgentHandoffDoneEvent(BaseModel): - id: str - - next_agent_id: str - - next_agent_name: str - - type: Optional[AgentHandoffDoneEventType] = "agent.handoff.done" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 diff --git a/src/mistralai/models/agenthandoffentry.py b/src/mistralai/models/agenthandoffentry.py deleted file mode 100644 index 66136256..00000000 --- a/src/mistralai/models/agenthandoffentry.py +++ /dev/null @@ -1,76 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AgentHandoffEntryObject = Literal["entry",] - - -AgentHandoffEntryType = Literal["agent.handoff",] - - -class AgentHandoffEntryTypedDict(TypedDict): - previous_agent_id: str - previous_agent_name: str - next_agent_id: str - next_agent_name: str - object: NotRequired[AgentHandoffEntryObject] - type: NotRequired[AgentHandoffEntryType] - created_at: NotRequired[datetime] - completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] - - -class AgentHandoffEntry(BaseModel): - previous_agent_id: str - - previous_agent_name: str - - next_agent_id: str - - next_agent_name: str - - object: Optional[AgentHandoffEntryObject] = "entry" - - type: Optional[AgentHandoffEntryType] = "agent.handoff" - - created_at: Optional[datetime] = None - - completed_at: OptionalNullable[datetime] = UNSET - - id: Optional[str] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id"] - nullable_fields = ["completed_at"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agenthandoffstartedevent.py b/src/mistralai/models/agenthandoffstartedevent.py deleted file mode 100644 index 11bfa918..00000000 --- a/src/mistralai/models/agenthandoffstartedevent.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AgentHandoffStartedEventType = Literal["agent.handoff.started",] - - -class AgentHandoffStartedEventTypedDict(TypedDict): - id: str - previous_agent_id: str - previous_agent_name: str - type: NotRequired[AgentHandoffStartedEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - - -class AgentHandoffStartedEvent(BaseModel): - id: str - - previous_agent_id: str - - previous_agent_name: str - - type: Optional[AgentHandoffStartedEventType] = "agent.handoff.started" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 diff --git a/src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py b/src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py deleted file mode 100644 index 6cf9d0e0..00000000 --- a/src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict(TypedDict): - agent_id: str - alias: str - version: int - - -class AgentsAPIV1AgentsCreateOrUpdateAliasRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - alias: Annotated[ - str, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) - ] - - version: Annotated[ - int, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) - ] diff --git a/src/mistralai/models/agents_api_v1_agents_deleteop.py b/src/mistralai/models/agents_api_v1_agents_deleteop.py deleted file mode 100644 index 38e04953..00000000 --- a/src/mistralai/models/agents_api_v1_agents_deleteop.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1AgentsDeleteRequestTypedDict(TypedDict): - agent_id: str - - -class AgentsAPIV1AgentsDeleteRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/agents_api_v1_agents_get_versionop.py b/src/mistralai/models/agents_api_v1_agents_get_versionop.py deleted file mode 100644 index fddb10dd..00000000 --- a/src/mistralai/models/agents_api_v1_agents_get_versionop.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1AgentsGetVersionRequestTypedDict(TypedDict): - agent_id: str - version: str - - -class AgentsAPIV1AgentsGetVersionRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - version: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/agents_api_v1_agents_getop.py b/src/mistralai/models/agents_api_v1_agents_getop.py deleted file mode 100644 index 2b7d89a5..00000000 --- a/src/mistralai/models/agents_api_v1_agents_getop.py +++ /dev/null @@ -1,62 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -QueryParamAgentVersionTypedDict = TypeAliasType( - "QueryParamAgentVersionTypedDict", Union[int, str] -) - - -QueryParamAgentVersion = TypeAliasType("QueryParamAgentVersion", Union[int, str]) - - -class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): - agent_id: str - agent_version: NotRequired[Nullable[QueryParamAgentVersionTypedDict]] - - -class AgentsAPIV1AgentsGetRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - agent_version: Annotated[ - OptionalNullable[QueryParamAgentVersion], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["agent_version"] - nullable_fields = ["agent_version"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py b/src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py deleted file mode 100644 index 650a7187..00000000 --- a/src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1AgentsListVersionAliasesRequestTypedDict(TypedDict): - agent_id: str - - -class AgentsAPIV1AgentsListVersionAliasesRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/agents_api_v1_agents_list_versionsop.py b/src/mistralai/models/agents_api_v1_agents_list_versionsop.py deleted file mode 100644 index cf988b3d..00000000 --- a/src/mistralai/models/agents_api_v1_agents_list_versionsop.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class AgentsAPIV1AgentsListVersionsRequestTypedDict(TypedDict): - agent_id: str - page: NotRequired[int] - r"""Page number (0-indexed)""" - page_size: NotRequired[int] - r"""Number of versions per page""" - - -class AgentsAPIV1AgentsListVersionsRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - r"""Page number (0-indexed)""" - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 20 - r"""Number of versions per page""" diff --git a/src/mistralai/models/agents_api_v1_agents_listop.py b/src/mistralai/models/agents_api_v1_agents_listop.py deleted file mode 100644 index 88b5bad1..00000000 --- a/src/mistralai/models/agents_api_v1_agents_listop.py +++ /dev/null @@ -1,98 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .requestsource import RequestSource -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import Any, Dict, List, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class AgentsAPIV1AgentsListRequestTypedDict(TypedDict): - page: NotRequired[int] - r"""Page number (0-indexed)""" - page_size: NotRequired[int] - r"""Number of agents per page""" - deployment_chat: NotRequired[Nullable[bool]] - sources: NotRequired[Nullable[List[RequestSource]]] - name: NotRequired[Nullable[str]] - id: NotRequired[Nullable[str]] - metadata: NotRequired[Nullable[Dict[str, Any]]] - - -class AgentsAPIV1AgentsListRequest(BaseModel): - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - r"""Page number (0-indexed)""" - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 20 - r"""Number of agents per page""" - - deployment_chat: Annotated[ - OptionalNullable[bool], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - sources: Annotated[ - OptionalNullable[List[RequestSource]], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - name: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - id: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - metadata: Annotated[ - OptionalNullable[Dict[str, Any]], - FieldMetadata(query=QueryParamMetadata(serialization="json")), - ] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "deployment_chat", - "sources", - "name", - "id", - "metadata", - ] - nullable_fields = ["deployment_chat", "sources", "name", "id", "metadata"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agents_api_v1_agents_update_versionop.py b/src/mistralai/models/agents_api_v1_agents_update_versionop.py deleted file mode 100644 index 5e4b97b3..00000000 --- a/src/mistralai/models/agents_api_v1_agents_update_versionop.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1AgentsUpdateVersionRequestTypedDict(TypedDict): - agent_id: str - version: int - - -class AgentsAPIV1AgentsUpdateVersionRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - version: Annotated[ - int, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) - ] diff --git a/src/mistralai/models/agents_api_v1_agents_updateop.py b/src/mistralai/models/agents_api_v1_agents_updateop.py deleted file mode 100644 index 32696fbe..00000000 --- a/src/mistralai/models/agents_api_v1_agents_updateop.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agentupdaterequest import AgentUpdateRequest, AgentUpdateRequestTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1AgentsUpdateRequestTypedDict(TypedDict): - agent_id: str - agent_update_request: AgentUpdateRequestTypedDict - - -class AgentsAPIV1AgentsUpdateRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - agent_update_request: Annotated[ - AgentUpdateRequest, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/agents_api_v1_conversations_append_streamop.py b/src/mistralai/models/agents_api_v1_conversations_append_streamop.py deleted file mode 100644 index d2489ffb..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_append_streamop.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .conversationappendstreamrequest import ( - ConversationAppendStreamRequest, - ConversationAppendStreamRequestTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1ConversationsAppendStreamRequestTypedDict(TypedDict): - conversation_id: str - r"""ID of the conversation to which we append entries.""" - conversation_append_stream_request: ConversationAppendStreamRequestTypedDict - - -class AgentsAPIV1ConversationsAppendStreamRequest(BaseModel): - conversation_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""ID of the conversation to which we append entries.""" - - conversation_append_stream_request: Annotated[ - ConversationAppendStreamRequest, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/agents_api_v1_conversations_appendop.py b/src/mistralai/models/agents_api_v1_conversations_appendop.py deleted file mode 100644 index ba37697e..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_appendop.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .conversationappendrequest import ( - ConversationAppendRequest, - ConversationAppendRequestTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1ConversationsAppendRequestTypedDict(TypedDict): - conversation_id: str - r"""ID of the conversation to which we append entries.""" - conversation_append_request: ConversationAppendRequestTypedDict - - -class AgentsAPIV1ConversationsAppendRequest(BaseModel): - conversation_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""ID of the conversation to which we append entries.""" - - conversation_append_request: Annotated[ - ConversationAppendRequest, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/agents_api_v1_conversations_deleteop.py b/src/mistralai/models/agents_api_v1_conversations_deleteop.py deleted file mode 100644 index 94126cae..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_deleteop.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1ConversationsDeleteRequestTypedDict(TypedDict): - conversation_id: str - r"""ID of the conversation from which we are fetching metadata.""" - - -class AgentsAPIV1ConversationsDeleteRequest(BaseModel): - conversation_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""ID of the conversation from which we are fetching metadata.""" diff --git a/src/mistralai/models/agents_api_v1_conversations_getop.py b/src/mistralai/models/agents_api_v1_conversations_getop.py deleted file mode 100644 index a37a61ba..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_getop.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agentconversation import AgentConversation, AgentConversationTypedDict -from .modelconversation import ModelConversation, ModelConversationTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict): - conversation_id: str - r"""ID of the conversation from which we are fetching metadata.""" - - -class AgentsAPIV1ConversationsGetRequest(BaseModel): - conversation_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""ID of the conversation from which we are fetching metadata.""" - - -AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict = TypeAliasType( - "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict", - Union[AgentConversationTypedDict, ModelConversationTypedDict], -) -r"""Successful Response""" - - -AgentsAPIV1ConversationsGetResponseV1ConversationsGet = TypeAliasType( - "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", - Union[AgentConversation, ModelConversation], -) -r"""Successful Response""" diff --git a/src/mistralai/models/agents_api_v1_conversations_historyop.py b/src/mistralai/models/agents_api_v1_conversations_historyop.py deleted file mode 100644 index b8c33d1b..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_historyop.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1ConversationsHistoryRequestTypedDict(TypedDict): - conversation_id: str - r"""ID of the conversation from which we are fetching entries.""" - - -class AgentsAPIV1ConversationsHistoryRequest(BaseModel): - conversation_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""ID of the conversation from which we are fetching entries.""" diff --git a/src/mistralai/models/agents_api_v1_conversations_listop.py b/src/mistralai/models/agents_api_v1_conversations_listop.py deleted file mode 100644 index d314f838..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_listop.py +++ /dev/null @@ -1,74 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agentconversation import AgentConversation, AgentConversationTypedDict -from .modelconversation import ModelConversation, ModelConversationTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import Any, Dict, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -class AgentsAPIV1ConversationsListRequestTypedDict(TypedDict): - page: NotRequired[int] - page_size: NotRequired[int] - metadata: NotRequired[Nullable[Dict[str, Any]]] - - -class AgentsAPIV1ConversationsListRequest(BaseModel): - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 100 - - metadata: Annotated[ - OptionalNullable[Dict[str, Any]], - FieldMetadata(query=QueryParamMetadata(serialization="json")), - ] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["page", "page_size", "metadata"] - nullable_fields = ["metadata"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m - - -ResponseBodyTypedDict = TypeAliasType( - "ResponseBodyTypedDict", - Union[AgentConversationTypedDict, ModelConversationTypedDict], -) - - -ResponseBody = TypeAliasType( - "ResponseBody", Union[AgentConversation, ModelConversation] -) diff --git a/src/mistralai/models/agents_api_v1_conversations_messagesop.py b/src/mistralai/models/agents_api_v1_conversations_messagesop.py deleted file mode 100644 index f0dac8bf..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_messagesop.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict): - conversation_id: str - r"""ID of the conversation from which we are fetching messages.""" - - -class AgentsAPIV1ConversationsMessagesRequest(BaseModel): - conversation_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""ID of the conversation from which we are fetching messages.""" diff --git a/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py b/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py deleted file mode 100644 index f39b74eb..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .conversationrestartstreamrequest import ( - ConversationRestartStreamRequest, - ConversationRestartStreamRequestTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict): - conversation_id: str - r"""ID of the original conversation which is being restarted.""" - conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict - - -class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel): - conversation_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""ID of the original conversation which is being restarted.""" - - conversation_restart_stream_request: Annotated[ - ConversationRestartStreamRequest, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/agents_api_v1_conversations_restartop.py b/src/mistralai/models/agents_api_v1_conversations_restartop.py deleted file mode 100644 index f706c066..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_restartop.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .conversationrestartrequest import ( - ConversationRestartRequest, - ConversationRestartRequestTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict): - conversation_id: str - r"""ID of the original conversation which is being restarted.""" - conversation_restart_request: ConversationRestartRequestTypedDict - - -class AgentsAPIV1ConversationsRestartRequest(BaseModel): - conversation_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""ID of the original conversation which is being restarted.""" - - conversation_restart_request: Annotated[ - ConversationRestartRequest, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py deleted file mode 100644 index cc07a6bd..00000000 --- a/src/mistralai/models/agentscompletionrequest.py +++ /dev/null @@ -1,192 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .mistralpromptmode import MistralPromptMode -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .tool import Tool, ToolTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -AgentsCompletionRequestStopTypedDict = TypeAliasType( - "AgentsCompletionRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -AgentsCompletionRequestStop = TypeAliasType( - "AgentsCompletionRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -AgentsCompletionRequestMessagesTypedDict = TypeAliasType( - "AgentsCompletionRequestMessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -AgentsCompletionRequestMessages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -AgentsCompletionRequestToolChoiceTypedDict = TypeAliasType( - "AgentsCompletionRequestToolChoiceTypedDict", - Union[ToolChoiceTypedDict, ToolChoiceEnum], -) - - -AgentsCompletionRequestToolChoice = TypeAliasType( - "AgentsCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] -) - - -class AgentsCompletionRequestTypedDict(TypedDict): - messages: List[AgentsCompletionRequestMessagesTypedDict] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - agent_id: str - r"""The ID of the agent to use for this completion.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - stop: NotRequired[AgentsCompletionRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - response_format: NotRequired[ResponseFormatTypedDict] - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[AgentsCompletionRequestToolChoiceTypedDict] - presence_penalty: NotRequired[float] - r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: NotRequired[float] - r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - n: NotRequired[Nullable[int]] - r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - parallel_tool_calls: NotRequired[bool] - prompt_mode: NotRequired[Nullable[MistralPromptMode]] - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - - -class AgentsCompletionRequest(BaseModel): - messages: List[AgentsCompletionRequestMessages] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - - agent_id: str - r"""The ID of the agent to use for this completion.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = False - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - - stop: Optional[AgentsCompletionRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - response_format: Optional[ResponseFormat] = None - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - - tools: OptionalNullable[List[Tool]] = UNSET - - tool_choice: Optional[AgentsCompletionRequestToolChoice] = None - - presence_penalty: Optional[float] = None - r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - - frequency_penalty: Optional[float] = None - r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - - n: OptionalNullable[int] = UNSET - r"""Number of completions to return for each request, input tokens are only billed once.""" - - prediction: Optional[Prediction] = None - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - - parallel_tool_calls: Optional[bool] = None - - prompt_mode: OptionalNullable[MistralPromptMode] = UNSET - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - ] - nullable_fields = [ - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py deleted file mode 100644 index d6a887be..00000000 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ /dev/null @@ -1,190 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .mistralpromptmode import MistralPromptMode -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .tool import Tool, ToolTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -AgentsCompletionStreamRequestStopTypedDict = TypeAliasType( - "AgentsCompletionStreamRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -AgentsCompletionStreamRequestStop = TypeAliasType( - "AgentsCompletionStreamRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -AgentsCompletionStreamRequestMessagesTypedDict = TypeAliasType( - "AgentsCompletionStreamRequestMessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -AgentsCompletionStreamRequestMessages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -AgentsCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( - "AgentsCompletionStreamRequestToolChoiceTypedDict", - Union[ToolChoiceTypedDict, ToolChoiceEnum], -) - - -AgentsCompletionStreamRequestToolChoice = TypeAliasType( - "AgentsCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] -) - - -class AgentsCompletionStreamRequestTypedDict(TypedDict): - messages: List[AgentsCompletionStreamRequestMessagesTypedDict] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - agent_id: str - r"""The ID of the agent to use for this completion.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - stop: NotRequired[AgentsCompletionStreamRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - response_format: NotRequired[ResponseFormatTypedDict] - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[AgentsCompletionStreamRequestToolChoiceTypedDict] - presence_penalty: NotRequired[float] - r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: NotRequired[float] - r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - n: NotRequired[Nullable[int]] - r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - parallel_tool_calls: NotRequired[bool] - prompt_mode: NotRequired[Nullable[MistralPromptMode]] - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - - -class AgentsCompletionStreamRequest(BaseModel): - messages: List[AgentsCompletionStreamRequestMessages] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - - agent_id: str - r"""The ID of the agent to use for this completion.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = True - - stop: Optional[AgentsCompletionStreamRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - response_format: Optional[ResponseFormat] = None - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - - tools: OptionalNullable[List[Tool]] = UNSET - - tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = None - - presence_penalty: Optional[float] = None - r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - - frequency_penalty: Optional[float] = None - r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - - n: OptionalNullable[int] = UNSET - r"""Number of completions to return for each request, input tokens are only billed once.""" - - prediction: Optional[Prediction] = None - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - - parallel_tool_calls: Optional[bool] = None - - prompt_mode: OptionalNullable[MistralPromptMode] = UNSET - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - ] - nullable_fields = [ - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agentupdaterequest.py b/src/mistralai/models/agentupdaterequest.py deleted file mode 100644 index e496907c..00000000 --- a/src/mistralai/models/agentupdaterequest.py +++ /dev/null @@ -1,127 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict -from .functiontool import FunctionTool, FunctionToolTypedDict -from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict -from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict -from .websearchtool import WebSearchTool, WebSearchToolTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -AgentUpdateRequestToolsTypedDict = TypeAliasType( - "AgentUpdateRequestToolsTypedDict", - Union[ - WebSearchToolTypedDict, - WebSearchPremiumToolTypedDict, - CodeInterpreterToolTypedDict, - ImageGenerationToolTypedDict, - FunctionToolTypedDict, - DocumentLibraryToolTypedDict, - ], -) - - -AgentUpdateRequestTools = Annotated[ - Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -class AgentUpdateRequestTypedDict(TypedDict): - instructions: NotRequired[Nullable[str]] - r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentUpdateRequestToolsTypedDict]] - r"""List of tools which are available to the model during the conversation.""" - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - model: NotRequired[Nullable[str]] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - handoffs: NotRequired[Nullable[List[str]]] - deployment_chat: NotRequired[Nullable[bool]] - metadata: NotRequired[Nullable[Dict[str, Any]]] - - -class AgentUpdateRequest(BaseModel): - instructions: OptionalNullable[str] = UNSET - r"""Instruction prompt the model will follow during the conversation.""" - - tools: Optional[List[AgentUpdateRequestTools]] = None - r"""List of tools which are available to the model during the conversation.""" - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" - - model: OptionalNullable[str] = UNSET - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - handoffs: OptionalNullable[List[str]] = UNSET - - deployment_chat: OptionalNullable[bool] = UNSET - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "model", - "name", - "description", - "handoffs", - "deployment_chat", - "metadata", - ] - nullable_fields = [ - "instructions", - "model", - "name", - "description", - "handoffs", - "deployment_chat", - "metadata", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/apiendpoint.py b/src/mistralai/models/apiendpoint.py deleted file mode 100644 index 0ad9366f..00000000 --- a/src/mistralai/models/apiendpoint.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -APIEndpoint = Union[ - Literal[ - "/v1/chat/completions", - "/v1/embeddings", - "/v1/fim/completions", - "/v1/moderations", - "/v1/chat/moderations", - "/v1/ocr", - "/v1/classifications", - "/v1/chat/classifications", - "/v1/conversations", - "/v1/audio/transcriptions", - ], - UnrecognizedStr, -] diff --git a/src/mistralai/models/archiveftmodelout.py b/src/mistralai/models/archiveftmodelout.py deleted file mode 100644 index 0f753cfc..00000000 --- a/src/mistralai/models/archiveftmodelout.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ArchiveFTModelOutObject = Literal["model",] - - -class ArchiveFTModelOutTypedDict(TypedDict): - id: str - object: NotRequired[ArchiveFTModelOutObject] - archived: NotRequired[bool] - - -class ArchiveFTModelOut(BaseModel): - id: str - - object: Optional[ArchiveFTModelOutObject] = "model" - - archived: Optional[bool] = True diff --git a/src/mistralai/models/assistantmessage.py b/src/mistralai/models/assistantmessage.py deleted file mode 100644 index a38a10c4..00000000 --- a/src/mistralai/models/assistantmessage.py +++ /dev/null @@ -1,71 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -AssistantMessageContentTypedDict = TypeAliasType( - "AssistantMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -AssistantMessageContent = TypeAliasType( - "AssistantMessageContent", Union[str, List[ContentChunk]] -) - - -AssistantMessageRole = Literal["assistant",] - - -class AssistantMessageTypedDict(TypedDict): - content: NotRequired[Nullable[AssistantMessageContentTypedDict]] - tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] - prefix: NotRequired[bool] - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - role: NotRequired[AssistantMessageRole] - - -class AssistantMessage(BaseModel): - content: OptionalNullable[AssistantMessageContent] = UNSET - - tool_calls: OptionalNullable[List[ToolCall]] = UNSET - - prefix: Optional[bool] = False - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - - role: Optional[AssistantMessageRole] = "assistant" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["content", "tool_calls", "prefix", "role"] - nullable_fields = ["content", "tool_calls"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/audiochunk.py b/src/mistralai/models/audiochunk.py deleted file mode 100644 index 64fc43ff..00000000 --- a/src/mistralai/models/audiochunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AudioChunkType = Literal["input_audio",] - - -class AudioChunkTypedDict(TypedDict): - input_audio: str - type: NotRequired[AudioChunkType] - - -class AudioChunk(BaseModel): - input_audio: str - - type: Optional[AudioChunkType] = "input_audio" diff --git a/src/mistralai/models/audioencoding.py b/src/mistralai/models/audioencoding.py deleted file mode 100644 index 13eb6d15..00000000 --- a/src/mistralai/models/audioencoding.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -AudioEncoding = Union[ - Literal[ - "pcm_s16le", - "pcm_s32le", - "pcm_f16le", - "pcm_f32le", - "pcm_mulaw", - "pcm_alaw", - ], - UnrecognizedStr, -] diff --git a/src/mistralai/models/audioformat.py b/src/mistralai/models/audioformat.py deleted file mode 100644 index 48ab648c..00000000 --- a/src/mistralai/models/audioformat.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .audioencoding import AudioEncoding -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class AudioFormatTypedDict(TypedDict): - encoding: AudioEncoding - sample_rate: int - - -class AudioFormat(BaseModel): - encoding: AudioEncoding - - sample_rate: int diff --git a/src/mistralai/models/audiotranscriptionrequest.py b/src/mistralai/models/audiotranscriptionrequest.py deleted file mode 100644 index 86417b42..00000000 --- a/src/mistralai/models/audiotranscriptionrequest.py +++ /dev/null @@ -1,107 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .file import File, FileTypedDict -from .timestampgranularity import TimestampGranularity -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, MultipartFormMetadata, validate_const -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class AudioTranscriptionRequestTypedDict(TypedDict): - model: str - r"""ID of the model to be used.""" - file: NotRequired[FileTypedDict] - file_url: NotRequired[Nullable[str]] - r"""Url of a file to be transcribed""" - file_id: NotRequired[Nullable[str]] - r"""ID of a file uploaded to /v1/files""" - language: NotRequired[Nullable[str]] - r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" - temperature: NotRequired[Nullable[float]] - stream: Literal[False] - diarize: NotRequired[bool] - context_bias: NotRequired[List[str]] - timestamp_granularities: NotRequired[List[TimestampGranularity]] - r"""Granularities of timestamps to include in the response.""" - - -class AudioTranscriptionRequest(BaseModel): - model: Annotated[str, FieldMetadata(multipart=True)] - r"""ID of the model to be used.""" - - file: Annotated[ - Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) - ] = None - - file_url: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET - r"""Url of a file to be transcribed""" - - file_id: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET - r"""ID of a file uploaded to /v1/files""" - - language: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET - r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" - - temperature: Annotated[OptionalNullable[float], FieldMetadata(multipart=True)] = ( - UNSET - ) - - STREAM: Annotated[ - Annotated[Optional[Literal[False]], AfterValidator(validate_const(False))], - pydantic.Field(alias="stream"), - FieldMetadata(multipart=True), - ] = False - - diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False - - context_bias: Annotated[Optional[List[str]], FieldMetadata(multipart=True)] = None - - timestamp_granularities: Annotated[ - Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) - ] = None - r"""Granularities of timestamps to include in the response.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "file", - "file_url", - "file_id", - "language", - "temperature", - "stream", - "diarize", - "context_bias", - "timestamp_granularities", - ] - nullable_fields = ["file_url", "file_id", "language", "temperature"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/audiotranscriptionrequeststream.py b/src/mistralai/models/audiotranscriptionrequeststream.py deleted file mode 100644 index 1f4087e8..00000000 --- a/src/mistralai/models/audiotranscriptionrequeststream.py +++ /dev/null @@ -1,105 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .file import File, FileTypedDict -from .timestampgranularity import TimestampGranularity -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, MultipartFormMetadata, validate_const -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class AudioTranscriptionRequestStreamTypedDict(TypedDict): - model: str - file: NotRequired[FileTypedDict] - file_url: NotRequired[Nullable[str]] - r"""Url of a file to be transcribed""" - file_id: NotRequired[Nullable[str]] - r"""ID of a file uploaded to /v1/files""" - language: NotRequired[Nullable[str]] - r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" - temperature: NotRequired[Nullable[float]] - stream: Literal[True] - diarize: NotRequired[bool] - context_bias: NotRequired[List[str]] - timestamp_granularities: NotRequired[List[TimestampGranularity]] - r"""Granularities of timestamps to include in the response.""" - - -class AudioTranscriptionRequestStream(BaseModel): - model: Annotated[str, FieldMetadata(multipart=True)] - - file: Annotated[ - Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) - ] = None - - file_url: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET - r"""Url of a file to be transcribed""" - - file_id: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET - r"""ID of a file uploaded to /v1/files""" - - language: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET - r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" - - temperature: Annotated[OptionalNullable[float], FieldMetadata(multipart=True)] = ( - UNSET - ) - - STREAM: Annotated[ - Annotated[Optional[Literal[True]], AfterValidator(validate_const(True))], - pydantic.Field(alias="stream"), - FieldMetadata(multipart=True), - ] = True - - diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False - - context_bias: Annotated[Optional[List[str]], FieldMetadata(multipart=True)] = None - - timestamp_granularities: Annotated[ - Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) - ] = None - r"""Granularities of timestamps to include in the response.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "file", - "file_url", - "file_id", - "language", - "temperature", - "stream", - "diarize", - "context_bias", - "timestamp_granularities", - ] - nullable_fields = ["file_url", "file_id", "language", "temperature"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/basemodelcard.py b/src/mistralai/models/basemodelcard.py deleted file mode 100644 index 706841b7..00000000 --- a/src/mistralai/models/basemodelcard.py +++ /dev/null @@ -1,110 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -BaseModelCardType = Literal["base",] - - -class BaseModelCardTypedDict(TypedDict): - id: str - capabilities: ModelCapabilitiesTypedDict - object: NotRequired[str] - created: NotRequired[int] - owned_by: NotRequired[str] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - max_context_length: NotRequired[int] - aliases: NotRequired[List[str]] - deprecation: NotRequired[Nullable[datetime]] - deprecation_replacement_model: NotRequired[Nullable[str]] - default_model_temperature: NotRequired[Nullable[float]] - type: BaseModelCardType - - -class BaseModelCard(BaseModel): - id: str - - capabilities: ModelCapabilities - - object: Optional[str] = "model" - - created: Optional[int] = None - - owned_by: Optional[str] = "mistralai" - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - max_context_length: Optional[int] = 32768 - - aliases: Optional[List[str]] = None - - deprecation: OptionalNullable[datetime] = UNSET - - deprecation_replacement_model: OptionalNullable[str] = UNSET - - default_model_temperature: OptionalNullable[float] = UNSET - - TYPE: Annotated[ - Annotated[Optional[BaseModelCardType], AfterValidator(validate_const("base"))], - pydantic.Field(alias="type"), - ] = "base" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "created", - "owned_by", - "name", - "description", - "max_context_length", - "aliases", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - "type", - ] - nullable_fields = [ - "name", - "description", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/batcherror.py b/src/mistralai/models/batcherror.py deleted file mode 100644 index 4f823446..00000000 --- a/src/mistralai/models/batcherror.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class BatchErrorTypedDict(TypedDict): - message: str - count: NotRequired[int] - - -class BatchError(BaseModel): - message: str - - count: Optional[int] = 1 diff --git a/src/mistralai/models/batchjobin.py b/src/mistralai/models/batchjobin.py deleted file mode 100644 index 839a9b3c..00000000 --- a/src/mistralai/models/batchjobin.py +++ /dev/null @@ -1,82 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .apiendpoint import APIEndpoint -from .batchrequest import BatchRequest, BatchRequestTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Dict, List, Optional -from typing_extensions import NotRequired, TypedDict - - -class BatchJobInTypedDict(TypedDict): - endpoint: APIEndpoint - input_files: NotRequired[Nullable[List[str]]] - r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" - requests: NotRequired[Nullable[List[BatchRequestTypedDict]]] - model: NotRequired[Nullable[str]] - r"""The model to be used for batch inference.""" - agent_id: NotRequired[Nullable[str]] - r"""In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here.""" - metadata: NotRequired[Nullable[Dict[str, str]]] - r"""The metadata of your choice to be associated with the batch inference job.""" - timeout_hours: NotRequired[int] - r"""The timeout in hours for the batch inference job.""" - - -class BatchJobIn(BaseModel): - endpoint: APIEndpoint - - input_files: OptionalNullable[List[str]] = UNSET - r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" - - requests: OptionalNullable[List[BatchRequest]] = UNSET - - model: OptionalNullable[str] = UNSET - r"""The model to be used for batch inference.""" - - agent_id: OptionalNullable[str] = UNSET - r"""In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here.""" - - metadata: OptionalNullable[Dict[str, str]] = UNSET - r"""The metadata of your choice to be associated with the batch inference job.""" - - timeout_hours: Optional[int] = 24 - r"""The timeout in hours for the batch inference job.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "input_files", - "requests", - "model", - "agent_id", - "metadata", - "timeout_hours", - ] - nullable_fields = ["input_files", "requests", "model", "agent_id", "metadata"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/batchjobout.py b/src/mistralai/models/batchjobout.py deleted file mode 100644 index 904cd349..00000000 --- a/src/mistralai/models/batchjobout.py +++ /dev/null @@ -1,123 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .batcherror import BatchError, BatchErrorTypedDict -from .batchjobstatus import BatchJobStatus -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict, List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -BatchJobOutObject = Literal["batch",] - - -class BatchJobOutTypedDict(TypedDict): - id: str - input_files: List[str] - endpoint: str - errors: List[BatchErrorTypedDict] - status: BatchJobStatus - created_at: int - total_requests: int - completed_requests: int - succeeded_requests: int - failed_requests: int - object: NotRequired[BatchJobOutObject] - metadata: NotRequired[Nullable[Dict[str, Any]]] - model: NotRequired[Nullable[str]] - agent_id: NotRequired[Nullable[str]] - output_file: NotRequired[Nullable[str]] - error_file: NotRequired[Nullable[str]] - outputs: NotRequired[Nullable[List[Dict[str, Any]]]] - started_at: NotRequired[Nullable[int]] - completed_at: NotRequired[Nullable[int]] - - -class BatchJobOut(BaseModel): - id: str - - input_files: List[str] - - endpoint: str - - errors: List[BatchError] - - status: BatchJobStatus - - created_at: int - - total_requests: int - - completed_requests: int - - succeeded_requests: int - - failed_requests: int - - object: Optional[BatchJobOutObject] = "batch" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - model: OptionalNullable[str] = UNSET - - agent_id: OptionalNullable[str] = UNSET - - output_file: OptionalNullable[str] = UNSET - - error_file: OptionalNullable[str] = UNSET - - outputs: OptionalNullable[List[Dict[str, Any]]] = UNSET - - started_at: OptionalNullable[int] = UNSET - - completed_at: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "metadata", - "model", - "agent_id", - "output_file", - "error_file", - "outputs", - "started_at", - "completed_at", - ] - nullable_fields = [ - "metadata", - "model", - "agent_id", - "output_file", - "error_file", - "outputs", - "started_at", - "completed_at", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/batchjobsout.py b/src/mistralai/models/batchjobsout.py deleted file mode 100644 index a1eba5db..00000000 --- a/src/mistralai/models/batchjobsout.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .batchjobout import BatchJobOut, BatchJobOutTypedDict -from mistralai.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -BatchJobsOutObject = Literal["list",] - - -class BatchJobsOutTypedDict(TypedDict): - total: int - data: NotRequired[List[BatchJobOutTypedDict]] - object: NotRequired[BatchJobsOutObject] - - -class BatchJobsOut(BaseModel): - total: int - - data: Optional[List[BatchJobOut]] = None - - object: Optional[BatchJobsOutObject] = "list" diff --git a/src/mistralai/models/batchjobstatus.py b/src/mistralai/models/batchjobstatus.py deleted file mode 100644 index 4b28059b..00000000 --- a/src/mistralai/models/batchjobstatus.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -BatchJobStatus = Literal[ - "QUEUED", - "RUNNING", - "SUCCESS", - "FAILED", - "TIMEOUT_EXCEEDED", - "CANCELLATION_REQUESTED", - "CANCELLED", -] diff --git a/src/mistralai/models/batchrequest.py b/src/mistralai/models/batchrequest.py deleted file mode 100644 index 3d1e98f7..00000000 --- a/src/mistralai/models/batchrequest.py +++ /dev/null @@ -1,48 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict -from typing_extensions import NotRequired, TypedDict - - -class BatchRequestTypedDict(TypedDict): - body: Dict[str, Any] - custom_id: NotRequired[Nullable[str]] - - -class BatchRequest(BaseModel): - body: Dict[str, Any] - - custom_id: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["custom_id"] - nullable_fields = ["custom_id"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/builtinconnectors.py b/src/mistralai/models/builtinconnectors.py deleted file mode 100644 index 6a3b2476..00000000 --- a/src/mistralai/models/builtinconnectors.py +++ /dev/null @@ -1,13 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -BuiltInConnectors = Literal[ - "web_search", - "web_search_premium", - "code_interpreter", - "image_generation", - "document_library", -] diff --git a/src/mistralai/models/chatclassificationrequest.py b/src/mistralai/models/chatclassificationrequest.py deleted file mode 100644 index f06f4f34..00000000 --- a/src/mistralai/models/chatclassificationrequest.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .inputs import Inputs, InputsTypedDict -from mistralai.types import BaseModel -import pydantic -from typing_extensions import Annotated, TypedDict - - -class ChatClassificationRequestTypedDict(TypedDict): - model: str - inputs: InputsTypedDict - r"""Chat to classify""" - - -class ChatClassificationRequest(BaseModel): - model: str - - inputs: Annotated[Inputs, pydantic.Field(alias="input")] - r"""Chat to classify""" diff --git a/src/mistralai/models/chatcompletionchoice.py b/src/mistralai/models/chatcompletionchoice.py deleted file mode 100644 index f2057ab4..00000000 --- a/src/mistralai/models/chatcompletionchoice.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from mistralai.types import BaseModel, UnrecognizedStr -from typing import Literal, Union -from typing_extensions import TypedDict - - -FinishReason = Union[ - Literal[ - "stop", - "length", - "model_length", - "error", - "tool_calls", - ], - UnrecognizedStr, -] - - -class ChatCompletionChoiceTypedDict(TypedDict): - index: int - message: AssistantMessageTypedDict - finish_reason: FinishReason - - -class ChatCompletionChoice(BaseModel): - index: int - - message: AssistantMessage - - finish_reason: FinishReason diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py deleted file mode 100644 index ad8b5428..00000000 --- a/src/mistralai/models/chatcompletionrequest.py +++ /dev/null @@ -1,215 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .mistralpromptmode import MistralPromptMode -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .tool import Tool, ToolTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]]) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -Stop = TypeAliasType("Stop", Union[str, List[str]]) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -MessagesTypedDict = TypeAliasType( - "MessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -Messages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -ChatCompletionRequestToolChoiceTypedDict = TypeAliasType( - "ChatCompletionRequestToolChoiceTypedDict", - Union[ToolChoiceTypedDict, ToolChoiceEnum], -) -r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" - - -ChatCompletionRequestToolChoice = TypeAliasType( - "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] -) -r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" - - -class ChatCompletionRequestTypedDict(TypedDict): - model: str - r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[MessagesTypedDict] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: NotRequired[Nullable[float]] - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - stop: NotRequired[StopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - response_format: NotRequired[ResponseFormatTypedDict] - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - tools: NotRequired[Nullable[List[ToolTypedDict]]] - r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" - tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] - r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" - presence_penalty: NotRequired[float] - r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: NotRequired[float] - r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - n: NotRequired[Nullable[int]] - r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - parallel_tool_calls: NotRequired[bool] - r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" - prompt_mode: NotRequired[Nullable[MistralPromptMode]] - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - safe_prompt: NotRequired[bool] - r"""Whether to inject a safety prompt before all conversations.""" - - -class ChatCompletionRequest(BaseModel): - model: str - r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - - messages: List[Messages] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - - temperature: OptionalNullable[float] = UNSET - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - - top_p: Optional[float] = None - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = False - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - - stop: Optional[Stop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - response_format: Optional[ResponseFormat] = None - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - - tools: OptionalNullable[List[Tool]] = UNSET - r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" - - tool_choice: Optional[ChatCompletionRequestToolChoice] = None - r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" - - presence_penalty: Optional[float] = None - r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - - frequency_penalty: Optional[float] = None - r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - - n: OptionalNullable[int] = UNSET - r"""Number of completions to return for each request, input tokens are only billed once.""" - - prediction: Optional[Prediction] = None - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - - parallel_tool_calls: Optional[bool] = None - r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" - - prompt_mode: OptionalNullable[MistralPromptMode] = UNSET - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - - safe_prompt: Optional[bool] = None - r"""Whether to inject a safety prompt before all conversations.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - "safe_prompt", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/chatcompletionresponse.py b/src/mistralai/models/chatcompletionresponse.py deleted file mode 100644 index 3d03b126..00000000 --- a/src/mistralai/models/chatcompletionresponse.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ChatCompletionResponseTypedDict(TypedDict): - id: str - object: str - model: str - usage: UsageInfoTypedDict - created: int - choices: List[ChatCompletionChoiceTypedDict] - - -class ChatCompletionResponse(BaseModel): - id: str - - object: str - - model: str - - usage: UsageInfo - - created: int - - choices: List[ChatCompletionChoice] diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py deleted file mode 100644 index 10f97e5f..00000000 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ /dev/null @@ -1,217 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .mistralpromptmode import MistralPromptMode -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .tool import Tool, ToolTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -ChatCompletionStreamRequestStopTypedDict = TypeAliasType( - "ChatCompletionStreamRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -ChatCompletionStreamRequestStop = TypeAliasType( - "ChatCompletionStreamRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -ChatCompletionStreamRequestMessagesTypedDict = TypeAliasType( - "ChatCompletionStreamRequestMessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -ChatCompletionStreamRequestMessages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( - "ChatCompletionStreamRequestToolChoiceTypedDict", - Union[ToolChoiceTypedDict, ToolChoiceEnum], -) -r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" - - -ChatCompletionStreamRequestToolChoice = TypeAliasType( - "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] -) -r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" - - -class ChatCompletionStreamRequestTypedDict(TypedDict): - model: str - r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[ChatCompletionStreamRequestMessagesTypedDict] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: NotRequired[Nullable[float]] - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - response_format: NotRequired[ResponseFormatTypedDict] - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - tools: NotRequired[Nullable[List[ToolTypedDict]]] - r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" - tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] - r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" - presence_penalty: NotRequired[float] - r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: NotRequired[float] - r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - n: NotRequired[Nullable[int]] - r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - parallel_tool_calls: NotRequired[bool] - r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" - prompt_mode: NotRequired[Nullable[MistralPromptMode]] - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - safe_prompt: NotRequired[bool] - r"""Whether to inject a safety prompt before all conversations.""" - - -class ChatCompletionStreamRequest(BaseModel): - model: str - r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - - messages: List[ChatCompletionStreamRequestMessages] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - - temperature: OptionalNullable[float] = UNSET - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - - top_p: Optional[float] = None - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = True - - stop: Optional[ChatCompletionStreamRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - response_format: Optional[ResponseFormat] = None - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - - tools: OptionalNullable[List[Tool]] = UNSET - r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" - - tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None - r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" - - presence_penalty: Optional[float] = None - r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - - frequency_penalty: Optional[float] = None - r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - - n: OptionalNullable[int] = UNSET - r"""Number of completions to return for each request, input tokens are only billed once.""" - - prediction: Optional[Prediction] = None - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - - parallel_tool_calls: Optional[bool] = None - r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" - - prompt_mode: OptionalNullable[MistralPromptMode] = UNSET - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - - safe_prompt: Optional[bool] = None - r"""Whether to inject a safety prompt before all conversations.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - "safe_prompt", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/chatmoderationrequest.py b/src/mistralai/models/chatmoderationrequest.py deleted file mode 100644 index 2f58d52f..00000000 --- a/src/mistralai/models/chatmoderationrequest.py +++ /dev/null @@ -1,83 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -import pydantic -from pydantic import Discriminator, Tag -from typing import List, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -TwoTypedDict = TypeAliasType( - "TwoTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -Two = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -OneTypedDict = TypeAliasType( - "OneTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -One = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -ChatModerationRequestInputsTypedDict = TypeAliasType( - "ChatModerationRequestInputsTypedDict", - Union[List[OneTypedDict], List[List[TwoTypedDict]]], -) -r"""Chat to classify""" - - -ChatModerationRequestInputs = TypeAliasType( - "ChatModerationRequestInputs", Union[List[One], List[List[Two]]] -) -r"""Chat to classify""" - - -class ChatModerationRequestTypedDict(TypedDict): - inputs: ChatModerationRequestInputsTypedDict - r"""Chat to classify""" - model: str - - -class ChatModerationRequest(BaseModel): - inputs: Annotated[ChatModerationRequestInputs, pydantic.Field(alias="input")] - r"""Chat to classify""" - - model: str diff --git a/src/mistralai/models/checkpointout.py b/src/mistralai/models/checkpointout.py deleted file mode 100644 index aefb7731..00000000 --- a/src/mistralai/models/checkpointout.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .metricout import MetricOut, MetricOutTypedDict -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class CheckpointOutTypedDict(TypedDict): - metrics: MetricOutTypedDict - r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" - step_number: int - r"""The step number that the checkpoint was created at.""" - created_at: int - r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" - - -class CheckpointOut(BaseModel): - metrics: MetricOut - r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" - - step_number: int - r"""The step number that the checkpoint was created at.""" - - created_at: int - r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" diff --git a/src/mistralai/models/classificationrequest.py b/src/mistralai/models/classificationrequest.py deleted file mode 100644 index 8a354378..00000000 --- a/src/mistralai/models/classificationrequest.py +++ /dev/null @@ -1,68 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import model_serializer -from typing import Any, Dict, List, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -ClassificationRequestInputsTypedDict = TypeAliasType( - "ClassificationRequestInputsTypedDict", Union[str, List[str]] -) -r"""Text to classify.""" - - -ClassificationRequestInputs = TypeAliasType( - "ClassificationRequestInputs", Union[str, List[str]] -) -r"""Text to classify.""" - - -class ClassificationRequestTypedDict(TypedDict): - model: str - r"""ID of the model to use.""" - inputs: ClassificationRequestInputsTypedDict - r"""Text to classify.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - - -class ClassificationRequest(BaseModel): - model: str - r"""ID of the model to use.""" - - inputs: Annotated[ClassificationRequestInputs, pydantic.Field(alias="input")] - r"""Text to classify.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["metadata"] - nullable_fields = ["metadata"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/classificationresponse.py b/src/mistralai/models/classificationresponse.py deleted file mode 100644 index b7741f37..00000000 --- a/src/mistralai/models/classificationresponse.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classificationtargetresult import ( - ClassificationTargetResult, - ClassificationTargetResultTypedDict, -) -from mistralai.types import BaseModel -from typing import Dict, List -from typing_extensions import TypedDict - - -class ClassificationResponseTypedDict(TypedDict): - id: str - model: str - results: List[Dict[str, ClassificationTargetResultTypedDict]] - - -class ClassificationResponse(BaseModel): - id: str - - model: str - - results: List[Dict[str, ClassificationTargetResult]] diff --git a/src/mistralai/models/classificationtargetresult.py b/src/mistralai/models/classificationtargetresult.py deleted file mode 100644 index 60c5a51b..00000000 --- a/src/mistralai/models/classificationtargetresult.py +++ /dev/null @@ -1,14 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Dict -from typing_extensions import TypedDict - - -class ClassificationTargetResultTypedDict(TypedDict): - scores: Dict[str, float] - - -class ClassificationTargetResult(BaseModel): - scores: Dict[str, float] diff --git a/src/mistralai/models/classifierdetailedjobout.py b/src/mistralai/models/classifierdetailedjobout.py deleted file mode 100644 index 701aee6e..00000000 --- a/src/mistralai/models/classifierdetailedjobout.py +++ /dev/null @@ -1,158 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .checkpointout import CheckpointOut, CheckpointOutTypedDict -from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict -from .classifiertrainingparameters import ( - ClassifierTrainingParameters, - ClassifierTrainingParametersTypedDict, -) -from .eventout import EventOut, EventOutTypedDict -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ClassifierDetailedJobOutStatus = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", -] - - -ClassifierDetailedJobOutObject = Literal["job",] - - -ClassifierDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict - - -ClassifierDetailedJobOutIntegrations = WandbIntegrationOut - - -ClassifierDetailedJobOutJobType = Literal["classifier",] - - -class ClassifierDetailedJobOutTypedDict(TypedDict): - id: str - auto_start: bool - model: str - r"""The name of the model to fine-tune.""" - status: ClassifierDetailedJobOutStatus - created_at: int - modified_at: int - training_files: List[str] - hyperparameters: ClassifierTrainingParametersTypedDict - classifier_targets: List[ClassifierTargetOutTypedDict] - validation_files: NotRequired[Nullable[List[str]]] - object: NotRequired[ClassifierDetailedJobOutObject] - fine_tuned_model: NotRequired[Nullable[str]] - suffix: NotRequired[Nullable[str]] - integrations: NotRequired[ - Nullable[List[ClassifierDetailedJobOutIntegrationsTypedDict]] - ] - trained_tokens: NotRequired[Nullable[int]] - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[ClassifierDetailedJobOutJobType] - events: NotRequired[List[EventOutTypedDict]] - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - checkpoints: NotRequired[List[CheckpointOutTypedDict]] - - -class ClassifierDetailedJobOut(BaseModel): - id: str - - auto_start: bool - - model: str - r"""The name of the model to fine-tune.""" - - status: ClassifierDetailedJobOutStatus - - created_at: int - - modified_at: int - - training_files: List[str] - - hyperparameters: ClassifierTrainingParameters - - classifier_targets: List[ClassifierTargetOut] - - validation_files: OptionalNullable[List[str]] = UNSET - - object: Optional[ClassifierDetailedJobOutObject] = "job" - - fine_tuned_model: OptionalNullable[str] = UNSET - - suffix: OptionalNullable[str] = UNSET - - integrations: OptionalNullable[List[ClassifierDetailedJobOutIntegrations]] = UNSET - - trained_tokens: OptionalNullable[int] = UNSET - - metadata: OptionalNullable[JobMetadataOut] = UNSET - - job_type: Optional[ClassifierDetailedJobOutJobType] = "classifier" - - events: Optional[List[EventOut]] = None - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - - checkpoints: Optional[List[CheckpointOut]] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - "job_type", - "events", - "checkpoints", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/classifierftmodelout.py b/src/mistralai/models/classifierftmodelout.py deleted file mode 100644 index d2a31fae..00000000 --- a/src/mistralai/models/classifierftmodelout.py +++ /dev/null @@ -1,108 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict -from .ftmodelcapabilitiesout import ( - FTModelCapabilitiesOut, - FTModelCapabilitiesOutTypedDict, -) -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ClassifierFTModelOutObject = Literal["model",] - - -ClassifierFTModelOutModelType = Literal["classifier",] - - -class ClassifierFTModelOutTypedDict(TypedDict): - id: str - created: int - owned_by: str - workspace_id: str - root: str - root_version: str - archived: bool - capabilities: FTModelCapabilitiesOutTypedDict - job: str - classifier_targets: List[ClassifierTargetOutTypedDict] - object: NotRequired[ClassifierFTModelOutObject] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - max_context_length: NotRequired[int] - aliases: NotRequired[List[str]] - model_type: NotRequired[ClassifierFTModelOutModelType] - - -class ClassifierFTModelOut(BaseModel): - id: str - - created: int - - owned_by: str - - workspace_id: str - - root: str - - root_version: str - - archived: bool - - capabilities: FTModelCapabilitiesOut - - job: str - - classifier_targets: List[ClassifierTargetOut] - - object: Optional[ClassifierFTModelOutObject] = "model" - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - max_context_length: Optional[int] = 32768 - - aliases: Optional[List[str]] = None - - model_type: Optional[ClassifierFTModelOutModelType] = "classifier" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "name", - "description", - "max_context_length", - "aliases", - "model_type", - ] - nullable_fields = ["name", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/classifierjobout.py b/src/mistralai/models/classifierjobout.py deleted file mode 100644 index a2f7cc08..00000000 --- a/src/mistralai/models/classifierjobout.py +++ /dev/null @@ -1,167 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifiertrainingparameters import ( - ClassifierTrainingParameters, - ClassifierTrainingParametersTypedDict, -) -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ClassifierJobOutStatus = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", -] -r"""The current status of the fine-tuning job.""" - - -ClassifierJobOutObject = Literal["job",] -r"""The object type of the fine-tuning job.""" - - -ClassifierJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict - - -ClassifierJobOutIntegrations = WandbIntegrationOut - - -ClassifierJobOutJobType = Literal["classifier",] -r"""The type of job (`FT` for fine-tuning).""" - - -class ClassifierJobOutTypedDict(TypedDict): - id: str - r"""The ID of the job.""" - auto_start: bool - model: str - r"""The name of the model to fine-tune.""" - status: ClassifierJobOutStatus - r"""The current status of the fine-tuning job.""" - created_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" - modified_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" - training_files: List[str] - r"""A list containing the IDs of uploaded files that contain training data.""" - hyperparameters: ClassifierTrainingParametersTypedDict - validation_files: NotRequired[Nullable[List[str]]] - r"""A list containing the IDs of uploaded files that contain validation data.""" - object: NotRequired[ClassifierJobOutObject] - r"""The object type of the fine-tuning job.""" - fine_tuned_model: NotRequired[Nullable[str]] - r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" - suffix: NotRequired[Nullable[str]] - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: NotRequired[Nullable[List[ClassifierJobOutIntegrationsTypedDict]]] - r"""A list of integrations enabled for your fine-tuning job.""" - trained_tokens: NotRequired[Nullable[int]] - r"""Total number of tokens trained.""" - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[ClassifierJobOutJobType] - r"""The type of job (`FT` for fine-tuning).""" - - -class ClassifierJobOut(BaseModel): - id: str - r"""The ID of the job.""" - - auto_start: bool - - model: str - r"""The name of the model to fine-tune.""" - - status: ClassifierJobOutStatus - r"""The current status of the fine-tuning job.""" - - created_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" - - modified_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" - - training_files: List[str] - r"""A list containing the IDs of uploaded files that contain training data.""" - - hyperparameters: ClassifierTrainingParameters - - validation_files: OptionalNullable[List[str]] = UNSET - r"""A list containing the IDs of uploaded files that contain validation data.""" - - object: Optional[ClassifierJobOutObject] = "job" - r"""The object type of the fine-tuning job.""" - - fine_tuned_model: OptionalNullable[str] = UNSET - r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" - - suffix: OptionalNullable[str] = UNSET - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - - integrations: OptionalNullable[List[ClassifierJobOutIntegrations]] = UNSET - r"""A list of integrations enabled for your fine-tuning job.""" - - trained_tokens: OptionalNullable[int] = UNSET - r"""Total number of tokens trained.""" - - metadata: OptionalNullable[JobMetadataOut] = UNSET - - job_type: Optional[ClassifierJobOutJobType] = "classifier" - r"""The type of job (`FT` for fine-tuning).""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - "job_type", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/classifiertargetin.py b/src/mistralai/models/classifiertargetin.py deleted file mode 100644 index d8a060e4..00000000 --- a/src/mistralai/models/classifiertargetin.py +++ /dev/null @@ -1,55 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .ftclassifierlossfunction import FTClassifierLossFunction -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict - - -class ClassifierTargetInTypedDict(TypedDict): - name: str - labels: List[str] - weight: NotRequired[float] - loss_function: NotRequired[Nullable[FTClassifierLossFunction]] - - -class ClassifierTargetIn(BaseModel): - name: str - - labels: List[str] - - weight: Optional[float] = 1 - - loss_function: OptionalNullable[FTClassifierLossFunction] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["weight", "loss_function"] - nullable_fields = ["loss_function"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/classifiertargetout.py b/src/mistralai/models/classifiertargetout.py deleted file mode 100644 index ddc587f4..00000000 --- a/src/mistralai/models/classifiertargetout.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .ftclassifierlossfunction import FTClassifierLossFunction -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ClassifierTargetOutTypedDict(TypedDict): - name: str - labels: List[str] - weight: float - loss_function: FTClassifierLossFunction - - -class ClassifierTargetOut(BaseModel): - name: str - - labels: List[str] - - weight: float - - loss_function: FTClassifierLossFunction diff --git a/src/mistralai/models/classifiertrainingparameters.py b/src/mistralai/models/classifiertrainingparameters.py deleted file mode 100644 index 718beeac..00000000 --- a/src/mistralai/models/classifiertrainingparameters.py +++ /dev/null @@ -1,73 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ClassifierTrainingParametersTypedDict(TypedDict): - training_steps: NotRequired[Nullable[int]] - learning_rate: NotRequired[float] - weight_decay: NotRequired[Nullable[float]] - warmup_fraction: NotRequired[Nullable[float]] - epochs: NotRequired[Nullable[float]] - seq_len: NotRequired[Nullable[int]] - - -class ClassifierTrainingParameters(BaseModel): - training_steps: OptionalNullable[int] = UNSET - - learning_rate: Optional[float] = 0.0001 - - weight_decay: OptionalNullable[float] = UNSET - - warmup_fraction: OptionalNullable[float] = UNSET - - epochs: OptionalNullable[float] = UNSET - - seq_len: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/classifiertrainingparametersin.py b/src/mistralai/models/classifiertrainingparametersin.py deleted file mode 100644 index 9868843f..00000000 --- a/src/mistralai/models/classifiertrainingparametersin.py +++ /dev/null @@ -1,85 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ClassifierTrainingParametersInTypedDict(TypedDict): - r"""The fine-tuning hyperparameter settings used in a classifier fine-tune job.""" - - training_steps: NotRequired[Nullable[int]] - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - learning_rate: NotRequired[float] - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - weight_decay: NotRequired[Nullable[float]] - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - warmup_fraction: NotRequired[Nullable[float]] - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - epochs: NotRequired[Nullable[float]] - seq_len: NotRequired[Nullable[int]] - - -class ClassifierTrainingParametersIn(BaseModel): - r"""The fine-tuning hyperparameter settings used in a classifier fine-tune job.""" - - training_steps: OptionalNullable[int] = UNSET - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - - learning_rate: Optional[float] = 0.0001 - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - - weight_decay: OptionalNullable[float] = UNSET - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - - warmup_fraction: OptionalNullable[float] = UNSET - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - - epochs: OptionalNullable[float] = UNSET - - seq_len: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/codeinterpretertool.py b/src/mistralai/models/codeinterpretertool.py deleted file mode 100644 index 48b74ee8..00000000 --- a/src/mistralai/models/codeinterpretertool.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -CodeInterpreterToolType = Literal["code_interpreter",] - - -class CodeInterpreterToolTypedDict(TypedDict): - type: NotRequired[CodeInterpreterToolType] - - -class CodeInterpreterTool(BaseModel): - type: Optional[CodeInterpreterToolType] = "code_interpreter" diff --git a/src/mistralai/models/completionargs.py b/src/mistralai/models/completionargs.py deleted file mode 100644 index 40aa0314..00000000 --- a/src/mistralai/models/completionargs.py +++ /dev/null @@ -1,101 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .toolchoiceenum import ToolChoiceEnum -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class CompletionArgsTypedDict(TypedDict): - r"""White-listed arguments from the completion API""" - - stop: NotRequired[Nullable[CompletionArgsStopTypedDict]] - presence_penalty: NotRequired[Nullable[float]] - frequency_penalty: NotRequired[Nullable[float]] - temperature: NotRequired[Nullable[float]] - top_p: NotRequired[Nullable[float]] - max_tokens: NotRequired[Nullable[int]] - random_seed: NotRequired[Nullable[int]] - prediction: NotRequired[Nullable[PredictionTypedDict]] - response_format: NotRequired[Nullable[ResponseFormatTypedDict]] - tool_choice: NotRequired[ToolChoiceEnum] - - -class CompletionArgs(BaseModel): - r"""White-listed arguments from the completion API""" - - stop: OptionalNullable[CompletionArgsStop] = UNSET - - presence_penalty: OptionalNullable[float] = UNSET - - frequency_penalty: OptionalNullable[float] = UNSET - - temperature: OptionalNullable[float] = UNSET - - top_p: OptionalNullable[float] = UNSET - - max_tokens: OptionalNullable[int] = UNSET - - random_seed: OptionalNullable[int] = UNSET - - prediction: OptionalNullable[Prediction] = UNSET - - response_format: OptionalNullable[ResponseFormat] = UNSET - - tool_choice: Optional[ToolChoiceEnum] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "stop", - "presence_penalty", - "frequency_penalty", - "temperature", - "top_p", - "max_tokens", - "random_seed", - "prediction", - "response_format", - "tool_choice", - ] - nullable_fields = [ - "stop", - "presence_penalty", - "frequency_penalty", - "temperature", - "top_p", - "max_tokens", - "random_seed", - "prediction", - "response_format", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/completionargsstop.py b/src/mistralai/models/completionargsstop.py deleted file mode 100644 index de7a0956..00000000 --- a/src/mistralai/models/completionargsstop.py +++ /dev/null @@ -1,13 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import List, Union -from typing_extensions import TypeAliasType - - -CompletionArgsStopTypedDict = TypeAliasType( - "CompletionArgsStopTypedDict", Union[str, List[str]] -) - - -CompletionArgsStop = TypeAliasType("CompletionArgsStop", Union[str, List[str]]) diff --git a/src/mistralai/models/completionchunk.py b/src/mistralai/models/completionchunk.py deleted file mode 100644 index 4d1fcfbf..00000000 --- a/src/mistralai/models/completionchunk.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionresponsestreamchoice import ( - CompletionResponseStreamChoice, - CompletionResponseStreamChoiceTypedDict, -) -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict - - -class CompletionChunkTypedDict(TypedDict): - id: str - model: str - choices: List[CompletionResponseStreamChoiceTypedDict] - object: NotRequired[str] - created: NotRequired[int] - usage: NotRequired[UsageInfoTypedDict] - - -class CompletionChunk(BaseModel): - id: str - - model: str - - choices: List[CompletionResponseStreamChoice] - - object: Optional[str] = None - - created: Optional[int] = None - - usage: Optional[UsageInfo] = None diff --git a/src/mistralai/models/completiondetailedjobout.py b/src/mistralai/models/completiondetailedjobout.py deleted file mode 100644 index df41bc2a..00000000 --- a/src/mistralai/models/completiondetailedjobout.py +++ /dev/null @@ -1,165 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .checkpointout import CheckpointOut, CheckpointOutTypedDict -from .completiontrainingparameters import ( - CompletionTrainingParameters, - CompletionTrainingParametersTypedDict, -) -from .eventout import EventOut, EventOutTypedDict -from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -CompletionDetailedJobOutStatus = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", -] - - -CompletionDetailedJobOutObject = Literal["job",] - - -CompletionDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict - - -CompletionDetailedJobOutIntegrations = WandbIntegrationOut - - -CompletionDetailedJobOutJobType = Literal["completion",] - - -CompletionDetailedJobOutRepositoriesTypedDict = GithubRepositoryOutTypedDict - - -CompletionDetailedJobOutRepositories = GithubRepositoryOut - - -class CompletionDetailedJobOutTypedDict(TypedDict): - id: str - auto_start: bool - model: str - r"""The name of the model to fine-tune.""" - status: CompletionDetailedJobOutStatus - created_at: int - modified_at: int - training_files: List[str] - hyperparameters: CompletionTrainingParametersTypedDict - validation_files: NotRequired[Nullable[List[str]]] - object: NotRequired[CompletionDetailedJobOutObject] - fine_tuned_model: NotRequired[Nullable[str]] - suffix: NotRequired[Nullable[str]] - integrations: NotRequired[ - Nullable[List[CompletionDetailedJobOutIntegrationsTypedDict]] - ] - trained_tokens: NotRequired[Nullable[int]] - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[CompletionDetailedJobOutJobType] - repositories: NotRequired[List[CompletionDetailedJobOutRepositoriesTypedDict]] - events: NotRequired[List[EventOutTypedDict]] - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - checkpoints: NotRequired[List[CheckpointOutTypedDict]] - - -class CompletionDetailedJobOut(BaseModel): - id: str - - auto_start: bool - - model: str - r"""The name of the model to fine-tune.""" - - status: CompletionDetailedJobOutStatus - - created_at: int - - modified_at: int - - training_files: List[str] - - hyperparameters: CompletionTrainingParameters - - validation_files: OptionalNullable[List[str]] = UNSET - - object: Optional[CompletionDetailedJobOutObject] = "job" - - fine_tuned_model: OptionalNullable[str] = UNSET - - suffix: OptionalNullable[str] = UNSET - - integrations: OptionalNullable[List[CompletionDetailedJobOutIntegrations]] = UNSET - - trained_tokens: OptionalNullable[int] = UNSET - - metadata: OptionalNullable[JobMetadataOut] = UNSET - - job_type: Optional[CompletionDetailedJobOutJobType] = "completion" - - repositories: Optional[List[CompletionDetailedJobOutRepositories]] = None - - events: Optional[List[EventOut]] = None - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - - checkpoints: Optional[List[CheckpointOut]] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - "job_type", - "repositories", - "events", - "checkpoints", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/completionevent.py b/src/mistralai/models/completionevent.py deleted file mode 100644 index cc859910..00000000 --- a/src/mistralai/models/completionevent.py +++ /dev/null @@ -1,14 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionchunk import CompletionChunk, CompletionChunkTypedDict -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class CompletionEventTypedDict(TypedDict): - data: CompletionChunkTypedDict - - -class CompletionEvent(BaseModel): - data: CompletionChunk diff --git a/src/mistralai/models/completionftmodelout.py b/src/mistralai/models/completionftmodelout.py deleted file mode 100644 index 7b6520de..00000000 --- a/src/mistralai/models/completionftmodelout.py +++ /dev/null @@ -1,104 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .ftmodelcapabilitiesout import ( - FTModelCapabilitiesOut, - FTModelCapabilitiesOutTypedDict, -) -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -CompletionFTModelOutObject = Literal["model",] - - -ModelType = Literal["completion",] - - -class CompletionFTModelOutTypedDict(TypedDict): - id: str - created: int - owned_by: str - workspace_id: str - root: str - root_version: str - archived: bool - capabilities: FTModelCapabilitiesOutTypedDict - job: str - object: NotRequired[CompletionFTModelOutObject] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - max_context_length: NotRequired[int] - aliases: NotRequired[List[str]] - model_type: NotRequired[ModelType] - - -class CompletionFTModelOut(BaseModel): - id: str - - created: int - - owned_by: str - - workspace_id: str - - root: str - - root_version: str - - archived: bool - - capabilities: FTModelCapabilitiesOut - - job: str - - object: Optional[CompletionFTModelOutObject] = "model" - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - max_context_length: Optional[int] = 32768 - - aliases: Optional[List[str]] = None - - model_type: Optional[ModelType] = "completion" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "name", - "description", - "max_context_length", - "aliases", - "model_type", - ] - nullable_fields = ["name", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/completionjobout.py b/src/mistralai/models/completionjobout.py deleted file mode 100644 index 70995d2a..00000000 --- a/src/mistralai/models/completionjobout.py +++ /dev/null @@ -1,178 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completiontrainingparameters import ( - CompletionTrainingParameters, - CompletionTrainingParametersTypedDict, -) -from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -Status = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", -] -r"""The current status of the fine-tuning job.""" - - -CompletionJobOutObject = Literal["job",] -r"""The object type of the fine-tuning job.""" - - -IntegrationsTypedDict = WandbIntegrationOutTypedDict - - -Integrations = WandbIntegrationOut - - -JobType = Literal["completion",] -r"""The type of job (`FT` for fine-tuning).""" - - -RepositoriesTypedDict = GithubRepositoryOutTypedDict - - -Repositories = GithubRepositoryOut - - -class CompletionJobOutTypedDict(TypedDict): - id: str - r"""The ID of the job.""" - auto_start: bool - model: str - r"""The name of the model to fine-tune.""" - status: Status - r"""The current status of the fine-tuning job.""" - created_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" - modified_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" - training_files: List[str] - r"""A list containing the IDs of uploaded files that contain training data.""" - hyperparameters: CompletionTrainingParametersTypedDict - validation_files: NotRequired[Nullable[List[str]]] - r"""A list containing the IDs of uploaded files that contain validation data.""" - object: NotRequired[CompletionJobOutObject] - r"""The object type of the fine-tuning job.""" - fine_tuned_model: NotRequired[Nullable[str]] - r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" - suffix: NotRequired[Nullable[str]] - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: NotRequired[Nullable[List[IntegrationsTypedDict]]] - r"""A list of integrations enabled for your fine-tuning job.""" - trained_tokens: NotRequired[Nullable[int]] - r"""Total number of tokens trained.""" - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[JobType] - r"""The type of job (`FT` for fine-tuning).""" - repositories: NotRequired[List[RepositoriesTypedDict]] - - -class CompletionJobOut(BaseModel): - id: str - r"""The ID of the job.""" - - auto_start: bool - - model: str - r"""The name of the model to fine-tune.""" - - status: Status - r"""The current status of the fine-tuning job.""" - - created_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" - - modified_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" - - training_files: List[str] - r"""A list containing the IDs of uploaded files that contain training data.""" - - hyperparameters: CompletionTrainingParameters - - validation_files: OptionalNullable[List[str]] = UNSET - r"""A list containing the IDs of uploaded files that contain validation data.""" - - object: Optional[CompletionJobOutObject] = "job" - r"""The object type of the fine-tuning job.""" - - fine_tuned_model: OptionalNullable[str] = UNSET - r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" - - suffix: OptionalNullable[str] = UNSET - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - - integrations: OptionalNullable[List[Integrations]] = UNSET - r"""A list of integrations enabled for your fine-tuning job.""" - - trained_tokens: OptionalNullable[int] = UNSET - r"""Total number of tokens trained.""" - - metadata: OptionalNullable[JobMetadataOut] = UNSET - - job_type: Optional[JobType] = "completion" - r"""The type of job (`FT` for fine-tuning).""" - - repositories: Optional[List[Repositories]] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - "job_type", - "repositories", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/completionresponsestreamchoice.py b/src/mistralai/models/completionresponsestreamchoice.py deleted file mode 100644 index 80f63987..00000000 --- a/src/mistralai/models/completionresponsestreamchoice.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr -from pydantic import model_serializer -from typing import Literal, Union -from typing_extensions import TypedDict - - -CompletionResponseStreamChoiceFinishReason = Union[ - Literal[ - "stop", - "length", - "error", - "tool_calls", - ], - UnrecognizedStr, -] - - -class CompletionResponseStreamChoiceTypedDict(TypedDict): - index: int - delta: DeltaMessageTypedDict - finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] - - -class CompletionResponseStreamChoice(BaseModel): - index: int - - delta: DeltaMessage - - finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["finish_reason"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/completiontrainingparameters.py b/src/mistralai/models/completiontrainingparameters.py deleted file mode 100644 index 0200e81c..00000000 --- a/src/mistralai/models/completiontrainingparameters.py +++ /dev/null @@ -1,78 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class CompletionTrainingParametersTypedDict(TypedDict): - training_steps: NotRequired[Nullable[int]] - learning_rate: NotRequired[float] - weight_decay: NotRequired[Nullable[float]] - warmup_fraction: NotRequired[Nullable[float]] - epochs: NotRequired[Nullable[float]] - seq_len: NotRequired[Nullable[int]] - fim_ratio: NotRequired[Nullable[float]] - - -class CompletionTrainingParameters(BaseModel): - training_steps: OptionalNullable[int] = UNSET - - learning_rate: Optional[float] = 0.0001 - - weight_decay: OptionalNullable[float] = UNSET - - warmup_fraction: OptionalNullable[float] = UNSET - - epochs: OptionalNullable[float] = UNSET - - seq_len: OptionalNullable[int] = UNSET - - fim_ratio: OptionalNullable[float] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/completiontrainingparametersin.py b/src/mistralai/models/completiontrainingparametersin.py deleted file mode 100644 index 1f74bb9d..00000000 --- a/src/mistralai/models/completiontrainingparametersin.py +++ /dev/null @@ -1,90 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class CompletionTrainingParametersInTypedDict(TypedDict): - r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" - - training_steps: NotRequired[Nullable[int]] - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - learning_rate: NotRequired[float] - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - weight_decay: NotRequired[Nullable[float]] - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - warmup_fraction: NotRequired[Nullable[float]] - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - epochs: NotRequired[Nullable[float]] - seq_len: NotRequired[Nullable[int]] - fim_ratio: NotRequired[Nullable[float]] - - -class CompletionTrainingParametersIn(BaseModel): - r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" - - training_steps: OptionalNullable[int] = UNSET - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - - learning_rate: Optional[float] = 0.0001 - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - - weight_decay: OptionalNullable[float] = UNSET - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - - warmup_fraction: OptionalNullable[float] = UNSET - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - - epochs: OptionalNullable[float] = UNSET - - seq_len: OptionalNullable[int] = UNSET - - fim_ratio: OptionalNullable[float] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/contentchunk.py b/src/mistralai/models/contentchunk.py deleted file mode 100644 index 47170eef..00000000 --- a/src/mistralai/models/contentchunk.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .audiochunk import AudioChunk, AudioChunkTypedDict -from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict -from .filechunk import FileChunk, FileChunkTypedDict -from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict -from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from .thinkchunk import ThinkChunk, ThinkChunkTypedDict -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType - - -ContentChunkTypedDict = TypeAliasType( - "ContentChunkTypedDict", - Union[ - TextChunkTypedDict, - ImageURLChunkTypedDict, - ReferenceChunkTypedDict, - FileChunkTypedDict, - AudioChunkTypedDict, - DocumentURLChunkTypedDict, - ThinkChunkTypedDict, - ], -) - - -ContentChunk = Annotated[ - Union[ - Annotated[ImageURLChunk, Tag("image_url")], - Annotated[DocumentURLChunk, Tag("document_url")], - Annotated[TextChunk, Tag("text")], - Annotated[ReferenceChunk, Tag("reference")], - Annotated[FileChunk, Tag("file")], - Annotated[ThinkChunk, Tag("thinking")], - Annotated[AudioChunk, Tag("input_audio")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] diff --git a/src/mistralai/models/conversationappendrequest.py b/src/mistralai/models/conversationappendrequest.py deleted file mode 100644 index 15cbc687..00000000 --- a/src/mistralai/models/conversationappendrequest.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ConversationAppendRequestHandoffExecution = Literal[ - "client", - "server", -] - - -class ConversationAppendRequestTypedDict(TypedDict): - inputs: ConversationInputsTypedDict - stream: NotRequired[bool] - store: NotRequired[bool] - r"""Whether to store the results into our servers or not.""" - handoff_execution: NotRequired[ConversationAppendRequestHandoffExecution] - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - - -class ConversationAppendRequest(BaseModel): - inputs: ConversationInputs - - stream: Optional[bool] = False - - store: Optional[bool] = True - r"""Whether to store the results into our servers or not.""" - - handoff_execution: Optional[ConversationAppendRequestHandoffExecution] = "server" - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/models/conversationappendstreamrequest.py b/src/mistralai/models/conversationappendstreamrequest.py deleted file mode 100644 index 8cecf89d..00000000 --- a/src/mistralai/models/conversationappendstreamrequest.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ConversationAppendStreamRequestHandoffExecution = Literal[ - "client", - "server", -] - - -class ConversationAppendStreamRequestTypedDict(TypedDict): - inputs: ConversationInputsTypedDict - stream: NotRequired[bool] - store: NotRequired[bool] - r"""Whether to store the results into our servers or not.""" - handoff_execution: NotRequired[ConversationAppendStreamRequestHandoffExecution] - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - - -class ConversationAppendStreamRequest(BaseModel): - inputs: ConversationInputs - - stream: Optional[bool] = True - - store: Optional[bool] = True - r"""Whether to store the results into our servers or not.""" - - handoff_execution: Optional[ConversationAppendStreamRequestHandoffExecution] = ( - "server" - ) - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/models/conversationevents.py b/src/mistralai/models/conversationevents.py deleted file mode 100644 index ba4c628c..00000000 --- a/src/mistralai/models/conversationevents.py +++ /dev/null @@ -1,78 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agenthandoffdoneevent import AgentHandoffDoneEvent, AgentHandoffDoneEventTypedDict -from .agenthandoffstartedevent import ( - AgentHandoffStartedEvent, - AgentHandoffStartedEventTypedDict, -) -from .functioncallevent import FunctionCallEvent, FunctionCallEventTypedDict -from .messageoutputevent import MessageOutputEvent, MessageOutputEventTypedDict -from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict -from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict -from .responsestartedevent import ResponseStartedEvent, ResponseStartedEventTypedDict -from .ssetypes import SSETypes -from .toolexecutiondeltaevent import ( - ToolExecutionDeltaEvent, - ToolExecutionDeltaEventTypedDict, -) -from .toolexecutiondoneevent import ( - ToolExecutionDoneEvent, - ToolExecutionDoneEventTypedDict, -) -from .toolexecutionstartedevent import ( - ToolExecutionStartedEvent, - ToolExecutionStartedEventTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -ConversationEventsDataTypedDict = TypeAliasType( - "ConversationEventsDataTypedDict", - Union[ - ResponseStartedEventTypedDict, - ResponseDoneEventTypedDict, - ResponseErrorEventTypedDict, - ToolExecutionStartedEventTypedDict, - ToolExecutionDeltaEventTypedDict, - ToolExecutionDoneEventTypedDict, - AgentHandoffStartedEventTypedDict, - AgentHandoffDoneEventTypedDict, - FunctionCallEventTypedDict, - MessageOutputEventTypedDict, - ], -) - - -ConversationEventsData = Annotated[ - Union[ - Annotated[AgentHandoffDoneEvent, Tag("agent.handoff.done")], - Annotated[AgentHandoffStartedEvent, Tag("agent.handoff.started")], - Annotated[ResponseDoneEvent, Tag("conversation.response.done")], - Annotated[ResponseErrorEvent, Tag("conversation.response.error")], - Annotated[ResponseStartedEvent, Tag("conversation.response.started")], - Annotated[FunctionCallEvent, Tag("function.call.delta")], - Annotated[MessageOutputEvent, Tag("message.output.delta")], - Annotated[ToolExecutionDeltaEvent, Tag("tool.execution.delta")], - Annotated[ToolExecutionDoneEvent, Tag("tool.execution.done")], - Annotated[ToolExecutionStartedEvent, Tag("tool.execution.started")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -class ConversationEventsTypedDict(TypedDict): - event: SSETypes - r"""Server side events sent when streaming a conversation response.""" - data: ConversationEventsDataTypedDict - - -class ConversationEvents(BaseModel): - event: SSETypes - r"""Server side events sent when streaming a conversation response.""" - - data: ConversationEventsData diff --git a/src/mistralai/models/conversationhistory.py b/src/mistralai/models/conversationhistory.py deleted file mode 100644 index d5206a57..00000000 --- a/src/mistralai/models/conversationhistory.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict -from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict -from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict -from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict -from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict -from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict -from mistralai.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ConversationHistoryObject = Literal["conversation.history",] - - -EntriesTypedDict = TypeAliasType( - "EntriesTypedDict", - Union[ - FunctionResultEntryTypedDict, - MessageInputEntryTypedDict, - FunctionCallEntryTypedDict, - ToolExecutionEntryTypedDict, - MessageOutputEntryTypedDict, - AgentHandoffEntryTypedDict, - ], -) - - -Entries = TypeAliasType( - "Entries", - Union[ - FunctionResultEntry, - MessageInputEntry, - FunctionCallEntry, - ToolExecutionEntry, - MessageOutputEntry, - AgentHandoffEntry, - ], -) - - -class ConversationHistoryTypedDict(TypedDict): - r"""Retrieve all entries in a conversation.""" - - conversation_id: str - entries: List[EntriesTypedDict] - object: NotRequired[ConversationHistoryObject] - - -class ConversationHistory(BaseModel): - r"""Retrieve all entries in a conversation.""" - - conversation_id: str - - entries: List[Entries] - - object: Optional[ConversationHistoryObject] = "conversation.history" diff --git a/src/mistralai/models/conversationinputs.py b/src/mistralai/models/conversationinputs.py deleted file mode 100644 index 4d30cd76..00000000 --- a/src/mistralai/models/conversationinputs.py +++ /dev/null @@ -1,14 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .inputentries import InputEntries, InputEntriesTypedDict -from typing import List, Union -from typing_extensions import TypeAliasType - - -ConversationInputsTypedDict = TypeAliasType( - "ConversationInputsTypedDict", Union[str, List[InputEntriesTypedDict]] -) - - -ConversationInputs = TypeAliasType("ConversationInputs", Union[str, List[InputEntries]]) diff --git a/src/mistralai/models/conversationmessages.py b/src/mistralai/models/conversationmessages.py deleted file mode 100644 index 32ca9c20..00000000 --- a/src/mistralai/models/conversationmessages.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .messageentries import MessageEntries, MessageEntriesTypedDict -from mistralai.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ConversationMessagesObject = Literal["conversation.messages",] - - -class ConversationMessagesTypedDict(TypedDict): - r"""Similar to the conversation history but only keep the messages""" - - conversation_id: str - messages: List[MessageEntriesTypedDict] - object: NotRequired[ConversationMessagesObject] - - -class ConversationMessages(BaseModel): - r"""Similar to the conversation history but only keep the messages""" - - conversation_id: str - - messages: List[MessageEntries] - - object: Optional[ConversationMessagesObject] = "conversation.messages" diff --git a/src/mistralai/models/conversationrequest.py b/src/mistralai/models/conversationrequest.py deleted file mode 100644 index 80581cc1..00000000 --- a/src/mistralai/models/conversationrequest.py +++ /dev/null @@ -1,154 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict -from .functiontool import FunctionTool, FunctionToolTypedDict -from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict -from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict -from .websearchtool import WebSearchTool, WebSearchToolTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -HandoffExecution = Literal[ - "client", - "server", -] - - -ToolsTypedDict = TypeAliasType( - "ToolsTypedDict", - Union[ - WebSearchToolTypedDict, - WebSearchPremiumToolTypedDict, - CodeInterpreterToolTypedDict, - ImageGenerationToolTypedDict, - FunctionToolTypedDict, - DocumentLibraryToolTypedDict, - ], -) - - -Tools = Annotated[ - Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -AgentVersionTypedDict = TypeAliasType("AgentVersionTypedDict", Union[str, int]) - - -AgentVersion = TypeAliasType("AgentVersion", Union[str, int]) - - -class ConversationRequestTypedDict(TypedDict): - inputs: ConversationInputsTypedDict - stream: NotRequired[bool] - store: NotRequired[Nullable[bool]] - handoff_execution: NotRequired[Nullable[HandoffExecution]] - instructions: NotRequired[Nullable[str]] - tools: NotRequired[List[ToolsTypedDict]] - r"""List of tools which are available to the model during the conversation.""" - completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - metadata: NotRequired[Nullable[Dict[str, Any]]] - agent_id: NotRequired[Nullable[str]] - agent_version: NotRequired[Nullable[AgentVersionTypedDict]] - model: NotRequired[Nullable[str]] - - -class ConversationRequest(BaseModel): - inputs: ConversationInputs - - stream: Optional[bool] = False - - store: OptionalNullable[bool] = UNSET - - handoff_execution: OptionalNullable[HandoffExecution] = UNSET - - instructions: OptionalNullable[str] = UNSET - - tools: Optional[List[Tools]] = None - r"""List of tools which are available to the model during the conversation.""" - - completion_args: OptionalNullable[CompletionArgs] = UNSET - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - agent_id: OptionalNullable[str] = UNSET - - agent_version: OptionalNullable[AgentVersion] = UNSET - - model: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "stream", - "store", - "handoff_execution", - "instructions", - "tools", - "completion_args", - "name", - "description", - "metadata", - "agent_id", - "agent_version", - "model", - ] - nullable_fields = [ - "store", - "handoff_execution", - "instructions", - "completion_args", - "name", - "description", - "metadata", - "agent_id", - "agent_version", - "model", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/conversationresponse.py b/src/mistralai/models/conversationresponse.py deleted file mode 100644 index ff318e35..00000000 --- a/src/mistralai/models/conversationresponse.py +++ /dev/null @@ -1,52 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict -from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict -from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict -from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict -from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict -from mistralai.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ConversationResponseObject = Literal["conversation.response",] - - -OutputsTypedDict = TypeAliasType( - "OutputsTypedDict", - Union[ - ToolExecutionEntryTypedDict, - FunctionCallEntryTypedDict, - MessageOutputEntryTypedDict, - AgentHandoffEntryTypedDict, - ], -) - - -Outputs = TypeAliasType( - "Outputs", - Union[ToolExecutionEntry, FunctionCallEntry, MessageOutputEntry, AgentHandoffEntry], -) - - -class ConversationResponseTypedDict(TypedDict): - r"""The response after appending new entries to the conversation.""" - - conversation_id: str - outputs: List[OutputsTypedDict] - usage: ConversationUsageInfoTypedDict - object: NotRequired[ConversationResponseObject] - - -class ConversationResponse(BaseModel): - r"""The response after appending new entries to the conversation.""" - - conversation_id: str - - outputs: List[Outputs] - - usage: ConversationUsageInfo - - object: Optional[ConversationResponseObject] = "conversation.response" diff --git a/src/mistralai/models/conversationrestartrequest.py b/src/mistralai/models/conversationrestartrequest.py deleted file mode 100644 index 6f21d012..00000000 --- a/src/mistralai/models/conversationrestartrequest.py +++ /dev/null @@ -1,107 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ConversationRestartRequestHandoffExecution = Literal[ - "client", - "server", -] - - -ConversationRestartRequestAgentVersionTypedDict = TypeAliasType( - "ConversationRestartRequestAgentVersionTypedDict", Union[str, int] -) -r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" - - -ConversationRestartRequestAgentVersion = TypeAliasType( - "ConversationRestartRequestAgentVersion", Union[str, int] -) -r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" - - -class ConversationRestartRequestTypedDict(TypedDict): - r"""Request to restart a new conversation from a given entry in the conversation.""" - - inputs: ConversationInputsTypedDict - from_entry_id: str - stream: NotRequired[bool] - store: NotRequired[bool] - r"""Whether to store the results into our servers or not.""" - handoff_execution: NotRequired[ConversationRestartRequestHandoffExecution] - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - r"""Custom metadata for the conversation.""" - agent_version: NotRequired[ - Nullable[ConversationRestartRequestAgentVersionTypedDict] - ] - r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" - - -class ConversationRestartRequest(BaseModel): - r"""Request to restart a new conversation from a given entry in the conversation.""" - - inputs: ConversationInputs - - from_entry_id: str - - stream: Optional[bool] = False - - store: Optional[bool] = True - r"""Whether to store the results into our servers or not.""" - - handoff_execution: Optional[ConversationRestartRequestHandoffExecution] = "server" - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - r"""Custom metadata for the conversation.""" - - agent_version: OptionalNullable[ConversationRestartRequestAgentVersion] = UNSET - r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "stream", - "store", - "handoff_execution", - "completion_args", - "metadata", - "agent_version", - ] - nullable_fields = ["metadata", "agent_version"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/conversationrestartstreamrequest.py b/src/mistralai/models/conversationrestartstreamrequest.py deleted file mode 100644 index 2cec7958..00000000 --- a/src/mistralai/models/conversationrestartstreamrequest.py +++ /dev/null @@ -1,111 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ConversationRestartStreamRequestHandoffExecution = Literal[ - "client", - "server", -] - - -ConversationRestartStreamRequestAgentVersionTypedDict = TypeAliasType( - "ConversationRestartStreamRequestAgentVersionTypedDict", Union[str, int] -) -r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" - - -ConversationRestartStreamRequestAgentVersion = TypeAliasType( - "ConversationRestartStreamRequestAgentVersion", Union[str, int] -) -r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" - - -class ConversationRestartStreamRequestTypedDict(TypedDict): - r"""Request to restart a new conversation from a given entry in the conversation.""" - - inputs: ConversationInputsTypedDict - from_entry_id: str - stream: NotRequired[bool] - store: NotRequired[bool] - r"""Whether to store the results into our servers or not.""" - handoff_execution: NotRequired[ConversationRestartStreamRequestHandoffExecution] - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - r"""Custom metadata for the conversation.""" - agent_version: NotRequired[ - Nullable[ConversationRestartStreamRequestAgentVersionTypedDict] - ] - r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" - - -class ConversationRestartStreamRequest(BaseModel): - r"""Request to restart a new conversation from a given entry in the conversation.""" - - inputs: ConversationInputs - - from_entry_id: str - - stream: Optional[bool] = True - - store: Optional[bool] = True - r"""Whether to store the results into our servers or not.""" - - handoff_execution: Optional[ConversationRestartStreamRequestHandoffExecution] = ( - "server" - ) - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - r"""Custom metadata for the conversation.""" - - agent_version: OptionalNullable[ConversationRestartStreamRequestAgentVersion] = ( - UNSET - ) - r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "stream", - "store", - "handoff_execution", - "completion_args", - "metadata", - "agent_version", - ] - nullable_fields = ["metadata", "agent_version"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/conversationstreamrequest.py b/src/mistralai/models/conversationstreamrequest.py deleted file mode 100644 index 1a481b77..00000000 --- a/src/mistralai/models/conversationstreamrequest.py +++ /dev/null @@ -1,160 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict -from .functiontool import FunctionTool, FunctionToolTypedDict -from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict -from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict -from .websearchtool import WebSearchTool, WebSearchToolTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -ConversationStreamRequestHandoffExecution = Literal[ - "client", - "server", -] - - -ConversationStreamRequestToolsTypedDict = TypeAliasType( - "ConversationStreamRequestToolsTypedDict", - Union[ - WebSearchToolTypedDict, - WebSearchPremiumToolTypedDict, - CodeInterpreterToolTypedDict, - ImageGenerationToolTypedDict, - FunctionToolTypedDict, - DocumentLibraryToolTypedDict, - ], -) - - -ConversationStreamRequestTools = Annotated[ - Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -ConversationStreamRequestAgentVersionTypedDict = TypeAliasType( - "ConversationStreamRequestAgentVersionTypedDict", Union[str, int] -) - - -ConversationStreamRequestAgentVersion = TypeAliasType( - "ConversationStreamRequestAgentVersion", Union[str, int] -) - - -class ConversationStreamRequestTypedDict(TypedDict): - inputs: ConversationInputsTypedDict - stream: NotRequired[bool] - store: NotRequired[Nullable[bool]] - handoff_execution: NotRequired[Nullable[ConversationStreamRequestHandoffExecution]] - instructions: NotRequired[Nullable[str]] - tools: NotRequired[List[ConversationStreamRequestToolsTypedDict]] - r"""List of tools which are available to the model during the conversation.""" - completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - metadata: NotRequired[Nullable[Dict[str, Any]]] - agent_id: NotRequired[Nullable[str]] - agent_version: NotRequired[Nullable[ConversationStreamRequestAgentVersionTypedDict]] - model: NotRequired[Nullable[str]] - - -class ConversationStreamRequest(BaseModel): - inputs: ConversationInputs - - stream: Optional[bool] = True - - store: OptionalNullable[bool] = UNSET - - handoff_execution: OptionalNullable[ConversationStreamRequestHandoffExecution] = ( - UNSET - ) - - instructions: OptionalNullable[str] = UNSET - - tools: Optional[List[ConversationStreamRequestTools]] = None - r"""List of tools which are available to the model during the conversation.""" - - completion_args: OptionalNullable[CompletionArgs] = UNSET - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - agent_id: OptionalNullable[str] = UNSET - - agent_version: OptionalNullable[ConversationStreamRequestAgentVersion] = UNSET - - model: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "stream", - "store", - "handoff_execution", - "instructions", - "tools", - "completion_args", - "name", - "description", - "metadata", - "agent_id", - "agent_version", - "model", - ] - nullable_fields = [ - "store", - "handoff_execution", - "instructions", - "completion_args", - "name", - "description", - "metadata", - "agent_id", - "agent_version", - "model", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/conversationusageinfo.py b/src/mistralai/models/conversationusageinfo.py deleted file mode 100644 index 9ae6f4fb..00000000 --- a/src/mistralai/models/conversationusageinfo.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Dict, Optional -from typing_extensions import NotRequired, TypedDict - - -class ConversationUsageInfoTypedDict(TypedDict): - prompt_tokens: NotRequired[int] - completion_tokens: NotRequired[int] - total_tokens: NotRequired[int] - connector_tokens: NotRequired[Nullable[int]] - connectors: NotRequired[Nullable[Dict[str, int]]] - - -class ConversationUsageInfo(BaseModel): - prompt_tokens: Optional[int] = 0 - - completion_tokens: Optional[int] = 0 - - total_tokens: Optional[int] = 0 - - connector_tokens: OptionalNullable[int] = UNSET - - connectors: OptionalNullable[Dict[str, int]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "prompt_tokens", - "completion_tokens", - "total_tokens", - "connector_tokens", - "connectors", - ] - nullable_fields = ["connector_tokens", "connectors"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py b/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py deleted file mode 100644 index 4acb8d53..00000000 --- a/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class DeleteModelV1ModelsModelIDDeleteRequestTypedDict(TypedDict): - model_id: str - r"""The ID of the model to delete.""" - - -class DeleteModelV1ModelsModelIDDeleteRequest(BaseModel): - model_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the model to delete.""" diff --git a/src/mistralai/models/deletefileout.py b/src/mistralai/models/deletefileout.py deleted file mode 100644 index 2b346ec4..00000000 --- a/src/mistralai/models/deletefileout.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class DeleteFileOutTypedDict(TypedDict): - id: str - r"""The ID of the deleted file.""" - object: str - r"""The object type that was deleted""" - deleted: bool - r"""The deletion status.""" - - -class DeleteFileOut(BaseModel): - id: str - r"""The ID of the deleted file.""" - - object: str - r"""The object type that was deleted""" - - deleted: bool - r"""The deletion status.""" diff --git a/src/mistralai/models/deletemodelout.py b/src/mistralai/models/deletemodelout.py deleted file mode 100644 index c1b1effc..00000000 --- a/src/mistralai/models/deletemodelout.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class DeleteModelOutTypedDict(TypedDict): - id: str - r"""The ID of the deleted model.""" - object: NotRequired[str] - r"""The object type that was deleted""" - deleted: NotRequired[bool] - r"""The deletion status""" - - -class DeleteModelOut(BaseModel): - id: str - r"""The ID of the deleted model.""" - - object: Optional[str] = "model" - r"""The object type that was deleted""" - - deleted: Optional[bool] = True - r"""The deletion status""" diff --git a/src/mistralai/models/deltamessage.py b/src/mistralai/models/deltamessage.py deleted file mode 100644 index 88aefe7f..00000000 --- a/src/mistralai/models/deltamessage.py +++ /dev/null @@ -1,61 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ContentTypedDict = TypeAliasType( - "ContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -Content = TypeAliasType("Content", Union[str, List[ContentChunk]]) - - -class DeltaMessageTypedDict(TypedDict): - role: NotRequired[Nullable[str]] - content: NotRequired[Nullable[ContentTypedDict]] - tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] - - -class DeltaMessage(BaseModel): - role: OptionalNullable[str] = UNSET - - content: OptionalNullable[Content] = UNSET - - tool_calls: OptionalNullable[List[ToolCall]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["role", "content", "tool_calls"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/documentlibrarytool.py b/src/mistralai/models/documentlibrarytool.py deleted file mode 100644 index 8d4c122b..00000000 --- a/src/mistralai/models/documentlibrarytool.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -DocumentLibraryToolType = Literal["document_library",] - - -class DocumentLibraryToolTypedDict(TypedDict): - library_ids: List[str] - r"""Ids of the library in which to search.""" - type: NotRequired[DocumentLibraryToolType] - - -class DocumentLibraryTool(BaseModel): - library_ids: List[str] - r"""Ids of the library in which to search.""" - - type: Optional[DocumentLibraryToolType] = "document_library" diff --git a/src/mistralai/models/documentout.py b/src/mistralai/models/documentout.py deleted file mode 100644 index 81d9605f..00000000 --- a/src/mistralai/models/documentout.py +++ /dev/null @@ -1,121 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict -from typing_extensions import NotRequired, TypedDict - - -class DocumentOutTypedDict(TypedDict): - id: str - library_id: str - hash: Nullable[str] - mime_type: Nullable[str] - extension: Nullable[str] - size: Nullable[int] - name: str - created_at: datetime - processing_status: str - uploaded_by_id: Nullable[str] - uploaded_by_type: str - tokens_processing_total: int - summary: NotRequired[Nullable[str]] - last_processed_at: NotRequired[Nullable[datetime]] - number_of_pages: NotRequired[Nullable[int]] - tokens_processing_main_content: NotRequired[Nullable[int]] - tokens_processing_summary: NotRequired[Nullable[int]] - url: NotRequired[Nullable[str]] - attributes: NotRequired[Nullable[Dict[str, Any]]] - - -class DocumentOut(BaseModel): - id: str - - library_id: str - - hash: Nullable[str] - - mime_type: Nullable[str] - - extension: Nullable[str] - - size: Nullable[int] - - name: str - - created_at: datetime - - processing_status: str - - uploaded_by_id: Nullable[str] - - uploaded_by_type: str - - tokens_processing_total: int - - summary: OptionalNullable[str] = UNSET - - last_processed_at: OptionalNullable[datetime] = UNSET - - number_of_pages: OptionalNullable[int] = UNSET - - tokens_processing_main_content: OptionalNullable[int] = UNSET - - tokens_processing_summary: OptionalNullable[int] = UNSET - - url: OptionalNullable[str] = UNSET - - attributes: OptionalNullable[Dict[str, Any]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "summary", - "last_processed_at", - "number_of_pages", - "tokens_processing_main_content", - "tokens_processing_summary", - "url", - "attributes", - ] - nullable_fields = [ - "hash", - "mime_type", - "extension", - "size", - "summary", - "last_processed_at", - "number_of_pages", - "uploaded_by_id", - "tokens_processing_main_content", - "tokens_processing_summary", - "url", - "attributes", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/documenttextcontent.py b/src/mistralai/models/documenttextcontent.py deleted file mode 100644 index c02528c2..00000000 --- a/src/mistralai/models/documenttextcontent.py +++ /dev/null @@ -1,13 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class DocumentTextContentTypedDict(TypedDict): - text: str - - -class DocumentTextContent(BaseModel): - text: str diff --git a/src/mistralai/models/documentupdatein.py b/src/mistralai/models/documentupdatein.py deleted file mode 100644 index bd89ff47..00000000 --- a/src/mistralai/models/documentupdatein.py +++ /dev/null @@ -1,65 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Dict, List, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -AttributesTypedDict = TypeAliasType( - "AttributesTypedDict", - Union[ - bool, str, int, float, datetime, List[str], List[int], List[float], List[bool] - ], -) - - -Attributes = TypeAliasType( - "Attributes", - Union[ - bool, str, int, float, datetime, List[str], List[int], List[float], List[bool] - ], -) - - -class DocumentUpdateInTypedDict(TypedDict): - name: NotRequired[Nullable[str]] - attributes: NotRequired[Nullable[Dict[str, AttributesTypedDict]]] - - -class DocumentUpdateIn(BaseModel): - name: OptionalNullable[str] = UNSET - - attributes: OptionalNullable[Dict[str, Attributes]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["name", "attributes"] - nullable_fields = ["name", "attributes"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/documenturlchunk.py b/src/mistralai/models/documenturlchunk.py deleted file mode 100644 index 6d0b1dc6..00000000 --- a/src/mistralai/models/documenturlchunk.py +++ /dev/null @@ -1,56 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -DocumentURLChunkType = Literal["document_url",] - - -class DocumentURLChunkTypedDict(TypedDict): - document_url: str - document_name: NotRequired[Nullable[str]] - r"""The filename of the document""" - type: NotRequired[DocumentURLChunkType] - - -class DocumentURLChunk(BaseModel): - document_url: str - - document_name: OptionalNullable[str] = UNSET - r"""The filename of the document""" - - type: Optional[DocumentURLChunkType] = "document_url" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["document_name", "type"] - nullable_fields = ["document_name"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/embeddingdtype.py b/src/mistralai/models/embeddingdtype.py deleted file mode 100644 index 26eee779..00000000 --- a/src/mistralai/models/embeddingdtype.py +++ /dev/null @@ -1,13 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -EmbeddingDtype = Literal[ - "float", - "int8", - "uint8", - "binary", - "ubinary", -] diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py deleted file mode 100644 index 44797bfa..00000000 --- a/src/mistralai/models/embeddingrequest.py +++ /dev/null @@ -1,84 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .embeddingdtype import EmbeddingDtype -from .encodingformat import EncodingFormat -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import model_serializer -from typing import Any, Dict, List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -EmbeddingRequestInputsTypedDict = TypeAliasType( - "EmbeddingRequestInputsTypedDict", Union[str, List[str]] -) -r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" - - -EmbeddingRequestInputs = TypeAliasType("EmbeddingRequestInputs", Union[str, List[str]]) -r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" - - -class EmbeddingRequestTypedDict(TypedDict): - model: str - r"""The ID of the model to be used for embedding.""" - inputs: EmbeddingRequestInputsTypedDict - r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - output_dimension: NotRequired[Nullable[int]] - r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" - output_dtype: NotRequired[EmbeddingDtype] - encoding_format: NotRequired[EncodingFormat] - - -class EmbeddingRequest(BaseModel): - model: str - r"""The ID of the model to be used for embedding.""" - - inputs: Annotated[EmbeddingRequestInputs, pydantic.Field(alias="input")] - r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - output_dimension: OptionalNullable[int] = UNSET - r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" - - output_dtype: Optional[EmbeddingDtype] = None - - encoding_format: Optional[EncodingFormat] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "metadata", - "output_dimension", - "output_dtype", - "encoding_format", - ] - nullable_fields = ["metadata", "output_dimension"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/embeddingresponse.py b/src/mistralai/models/embeddingresponse.py deleted file mode 100644 index aae6fa60..00000000 --- a/src/mistralai/models/embeddingresponse.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class EmbeddingResponseTypedDict(TypedDict): - id: str - object: str - model: str - usage: UsageInfoTypedDict - data: List[EmbeddingResponseDataTypedDict] - - -class EmbeddingResponse(BaseModel): - id: str - - object: str - - model: str - - usage: UsageInfo - - data: List[EmbeddingResponseData] diff --git a/src/mistralai/models/embeddingresponsedata.py b/src/mistralai/models/embeddingresponsedata.py deleted file mode 100644 index 01e2765f..00000000 --- a/src/mistralai/models/embeddingresponsedata.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict - - -class EmbeddingResponseDataTypedDict(TypedDict): - object: NotRequired[str] - embedding: NotRequired[List[float]] - index: NotRequired[int] - - -class EmbeddingResponseData(BaseModel): - object: Optional[str] = None - - embedding: Optional[List[float]] = None - - index: Optional[int] = None diff --git a/src/mistralai/models/encodingformat.py b/src/mistralai/models/encodingformat.py deleted file mode 100644 index be6c1a14..00000000 --- a/src/mistralai/models/encodingformat.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -EncodingFormat = Literal[ - "float", - "base64", -] diff --git a/src/mistralai/models/entitytype.py b/src/mistralai/models/entitytype.py deleted file mode 100644 index 8d2d4bbe..00000000 --- a/src/mistralai/models/entitytype.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -EntityType = Union[ - Literal[ - "User", - "Workspace", - "Org", - ], - UnrecognizedStr, -] -r"""The type of entity, used to share a library.""" diff --git a/src/mistralai/models/eventout.py b/src/mistralai/models/eventout.py deleted file mode 100644 index 32819034..00000000 --- a/src/mistralai/models/eventout.py +++ /dev/null @@ -1,55 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict -from typing_extensions import NotRequired, TypedDict - - -class EventOutTypedDict(TypedDict): - name: str - r"""The name of the event.""" - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - data: NotRequired[Nullable[Dict[str, Any]]] - - -class EventOut(BaseModel): - name: str - r"""The name of the event.""" - - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - - data: OptionalNullable[Dict[str, Any]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["data"] - nullable_fields = ["data"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/file.py b/src/mistralai/models/file.py deleted file mode 100644 index 682d7f6e..00000000 --- a/src/mistralai/models/file.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -import io -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, MultipartFormMetadata -import pydantic -from typing import IO, Optional, Union -from typing_extensions import Annotated, NotRequired, TypedDict - - -class FileTypedDict(TypedDict): - file_name: str - content: Union[bytes, IO[bytes], io.BufferedReader] - content_type: NotRequired[str] - - -class File(BaseModel): - file_name: Annotated[ - str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True) - ] - - content: Annotated[ - Union[bytes, IO[bytes], io.BufferedReader], - pydantic.Field(alias=""), - FieldMetadata(multipart=MultipartFormMetadata(content=True)), - ] - - content_type: Annotated[ - Optional[str], - pydantic.Field(alias="Content-Type"), - FieldMetadata(multipart=True), - ] = None diff --git a/src/mistralai/models/filechunk.py b/src/mistralai/models/filechunk.py deleted file mode 100644 index 83e60cef..00000000 --- a/src/mistralai/models/filechunk.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, TypedDict - - -class FileChunkTypedDict(TypedDict): - file_id: str - type: Literal["file"] - - -class FileChunk(BaseModel): - file_id: str - - TYPE: Annotated[ - Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))], - pydantic.Field(alias="type"), - ] = "file" diff --git a/src/mistralai/models/filepurpose.py b/src/mistralai/models/filepurpose.py deleted file mode 100644 index b109b350..00000000 --- a/src/mistralai/models/filepurpose.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -FilePurpose = Union[ - Literal[ - "fine-tune", - "batch", - "ocr", - ], - UnrecognizedStr, -] diff --git a/src/mistralai/models/files_api_routes_delete_fileop.py b/src/mistralai/models/files_api_routes_delete_fileop.py deleted file mode 100644 index a84a7a8e..00000000 --- a/src/mistralai/models/files_api_routes_delete_fileop.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class FilesAPIRoutesDeleteFileRequestTypedDict(TypedDict): - file_id: str - - -class FilesAPIRoutesDeleteFileRequest(BaseModel): - file_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/files_api_routes_download_fileop.py b/src/mistralai/models/files_api_routes_download_fileop.py deleted file mode 100644 index 168a7fa6..00000000 --- a/src/mistralai/models/files_api_routes_download_fileop.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class FilesAPIRoutesDownloadFileRequestTypedDict(TypedDict): - file_id: str - - -class FilesAPIRoutesDownloadFileRequest(BaseModel): - file_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/files_api_routes_get_signed_urlop.py b/src/mistralai/models/files_api_routes_get_signed_urlop.py deleted file mode 100644 index 708d40ab..00000000 --- a/src/mistralai/models/files_api_routes_get_signed_urlop.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class FilesAPIRoutesGetSignedURLRequestTypedDict(TypedDict): - file_id: str - expiry: NotRequired[int] - r"""Number of hours before the url becomes invalid. Defaults to 24h""" - - -class FilesAPIRoutesGetSignedURLRequest(BaseModel): - file_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - expiry: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 24 - r"""Number of hours before the url becomes invalid. Defaults to 24h""" diff --git a/src/mistralai/models/files_api_routes_list_filesop.py b/src/mistralai/models/files_api_routes_list_filesop.py deleted file mode 100644 index 84d61b9b..00000000 --- a/src/mistralai/models/files_api_routes_list_filesop.py +++ /dev/null @@ -1,103 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .filepurpose import FilePurpose -from .sampletype import SampleType -from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import List, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): - page: NotRequired[int] - page_size: NotRequired[int] - include_total: NotRequired[bool] - sample_type: NotRequired[Nullable[List[SampleType]]] - source: NotRequired[Nullable[List[Source]]] - search: NotRequired[Nullable[str]] - purpose: NotRequired[Nullable[FilePurpose]] - mimetypes: NotRequired[Nullable[List[str]]] - - -class FilesAPIRoutesListFilesRequest(BaseModel): - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 100 - - include_total: Annotated[ - Optional[bool], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = True - - sample_type: Annotated[ - OptionalNullable[List[SampleType]], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - source: Annotated[ - OptionalNullable[List[Source]], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - search: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - purpose: Annotated[ - OptionalNullable[FilePurpose], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - mimetypes: Annotated[ - OptionalNullable[List[str]], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "include_total", - "sample_type", - "source", - "search", - "purpose", - "mimetypes", - ] - nullable_fields = ["sample_type", "source", "search", "purpose", "mimetypes"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/files_api_routes_retrieve_fileop.py b/src/mistralai/models/files_api_routes_retrieve_fileop.py deleted file mode 100644 index 0c2a95ef..00000000 --- a/src/mistralai/models/files_api_routes_retrieve_fileop.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class FilesAPIRoutesRetrieveFileRequestTypedDict(TypedDict): - file_id: str - - -class FilesAPIRoutesRetrieveFileRequest(BaseModel): - file_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/files_api_routes_upload_fileop.py b/src/mistralai/models/files_api_routes_upload_fileop.py deleted file mode 100644 index aeefe842..00000000 --- a/src/mistralai/models/files_api_routes_upload_fileop.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .file import File, FileTypedDict -from .filepurpose import FilePurpose -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, MultipartFormMetadata -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): - file: FileTypedDict - r"""The File object (not file name) to be uploaded. - To upload a file and specify a custom file name you should format your request as such: - ```bash - file=@path/to/your/file.jsonl;filename=custom_name.jsonl - ``` - Otherwise, you can just keep the original file name: - ```bash - file=@path/to/your/file.jsonl - ``` - """ - purpose: NotRequired[FilePurpose] - - -class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): - file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] - r"""The File object (not file name) to be uploaded. - To upload a file and specify a custom file name you should format your request as such: - ```bash - file=@path/to/your/file.jsonl;filename=custom_name.jsonl - ``` - Otherwise, you can just keep the original file name: - ```bash - file=@path/to/your/file.jsonl - ``` - """ - - purpose: Annotated[Optional[FilePurpose], FieldMetadata(multipart=True)] = None diff --git a/src/mistralai/models/fileschema.py b/src/mistralai/models/fileschema.py deleted file mode 100644 index 9a88f1bb..00000000 --- a/src/mistralai/models/fileschema.py +++ /dev/null @@ -1,88 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .filepurpose import FilePurpose -from .sampletype import SampleType -from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import model_serializer -from typing_extensions import Annotated, NotRequired, TypedDict - - -class FileSchemaTypedDict(TypedDict): - id: str - r"""The unique identifier of the file.""" - object: str - r"""The object type, which is always \"file\".""" - size_bytes: int - r"""The size of the file, in bytes.""" - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - filename: str - r"""The name of the uploaded file.""" - purpose: FilePurpose - sample_type: SampleType - source: Source - num_lines: NotRequired[Nullable[int]] - mimetype: NotRequired[Nullable[str]] - signature: NotRequired[Nullable[str]] - - -class FileSchema(BaseModel): - id: str - r"""The unique identifier of the file.""" - - object: str - r"""The object type, which is always \"file\".""" - - size_bytes: Annotated[int, pydantic.Field(alias="bytes")] - r"""The size of the file, in bytes.""" - - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - - filename: str - r"""The name of the uploaded file.""" - - purpose: FilePurpose - - sample_type: SampleType - - source: Source - - num_lines: OptionalNullable[int] = UNSET - - mimetype: OptionalNullable[str] = UNSET - - signature: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["num_lines", "mimetype", "signature"] - nullable_fields = ["num_lines", "mimetype", "signature"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/filesignedurl.py b/src/mistralai/models/filesignedurl.py deleted file mode 100644 index 092be7f8..00000000 --- a/src/mistralai/models/filesignedurl.py +++ /dev/null @@ -1,13 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class FileSignedURLTypedDict(TypedDict): - url: str - - -class FileSignedURL(BaseModel): - url: str diff --git a/src/mistralai/models/fimcompletionrequest.py b/src/mistralai/models/fimcompletionrequest.py deleted file mode 100644 index 801a358b..00000000 --- a/src/mistralai/models/fimcompletionrequest.py +++ /dev/null @@ -1,124 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict, List, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -FIMCompletionRequestStopTypedDict = TypeAliasType( - "FIMCompletionRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -FIMCompletionRequestStop = TypeAliasType( - "FIMCompletionRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -class FIMCompletionRequestTypedDict(TypedDict): - model: str - r"""ID of the model with FIM to use.""" - prompt: str - r"""The text/code to complete.""" - temperature: NotRequired[Nullable[float]] - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - stop: NotRequired[FIMCompletionRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - suffix: NotRequired[Nullable[str]] - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" - - -class FIMCompletionRequest(BaseModel): - model: str - r"""ID of the model with FIM to use.""" - - prompt: str - r"""The text/code to complete.""" - - temperature: OptionalNullable[float] = UNSET - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - - top_p: Optional[float] = 1 - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = False - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - - stop: Optional[FIMCompletionRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - suffix: OptionalNullable[str] = UNSET - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/fimcompletionresponse.py b/src/mistralai/models/fimcompletionresponse.py deleted file mode 100644 index f27972b9..00000000 --- a/src/mistralai/models/fimcompletionresponse.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class FIMCompletionResponseTypedDict(TypedDict): - id: str - object: str - model: str - usage: UsageInfoTypedDict - created: int - choices: List[ChatCompletionChoiceTypedDict] - - -class FIMCompletionResponse(BaseModel): - id: str - - object: str - - model: str - - usage: UsageInfo - - created: int - - choices: List[ChatCompletionChoice] diff --git a/src/mistralai/models/fimcompletionstreamrequest.py b/src/mistralai/models/fimcompletionstreamrequest.py deleted file mode 100644 index 2e8e6db2..00000000 --- a/src/mistralai/models/fimcompletionstreamrequest.py +++ /dev/null @@ -1,122 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict, List, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -FIMCompletionStreamRequestStopTypedDict = TypeAliasType( - "FIMCompletionStreamRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -FIMCompletionStreamRequestStop = TypeAliasType( - "FIMCompletionStreamRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -class FIMCompletionStreamRequestTypedDict(TypedDict): - model: str - r"""ID of the model with FIM to use.""" - prompt: str - r"""The text/code to complete.""" - temperature: NotRequired[Nullable[float]] - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - stop: NotRequired[FIMCompletionStreamRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - suffix: NotRequired[Nullable[str]] - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" - - -class FIMCompletionStreamRequest(BaseModel): - model: str - r"""ID of the model with FIM to use.""" - - prompt: str - r"""The text/code to complete.""" - - temperature: OptionalNullable[float] = UNSET - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - - top_p: Optional[float] = 1 - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = True - - stop: Optional[FIMCompletionStreamRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - suffix: OptionalNullable[str] = UNSET - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/finetuneablemodeltype.py b/src/mistralai/models/finetuneablemodeltype.py deleted file mode 100644 index f5b8b2ed..00000000 --- a/src/mistralai/models/finetuneablemodeltype.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -FineTuneableModelType = Literal[ - "completion", - "classifier", -] diff --git a/src/mistralai/models/ftclassifierlossfunction.py b/src/mistralai/models/ftclassifierlossfunction.py deleted file mode 100644 index c4ef66e0..00000000 --- a/src/mistralai/models/ftclassifierlossfunction.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -FTClassifierLossFunction = Literal[ - "single_class", - "multi_class", -] diff --git a/src/mistralai/models/ftmodelcapabilitiesout.py b/src/mistralai/models/ftmodelcapabilitiesout.py deleted file mode 100644 index 7f3aa18b..00000000 --- a/src/mistralai/models/ftmodelcapabilitiesout.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class FTModelCapabilitiesOutTypedDict(TypedDict): - completion_chat: NotRequired[bool] - completion_fim: NotRequired[bool] - function_calling: NotRequired[bool] - fine_tuning: NotRequired[bool] - classification: NotRequired[bool] - - -class FTModelCapabilitiesOut(BaseModel): - completion_chat: Optional[bool] = True - - completion_fim: Optional[bool] = False - - function_calling: Optional[bool] = False - - fine_tuning: Optional[bool] = False - - classification: Optional[bool] = False diff --git a/src/mistralai/models/ftmodelcard.py b/src/mistralai/models/ftmodelcard.py deleted file mode 100644 index 1c3bd04d..00000000 --- a/src/mistralai/models/ftmodelcard.py +++ /dev/null @@ -1,126 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -FTModelCardType = Literal["fine-tuned",] - - -class FTModelCardTypedDict(TypedDict): - r"""Extra fields for fine-tuned models.""" - - id: str - capabilities: ModelCapabilitiesTypedDict - job: str - root: str - object: NotRequired[str] - created: NotRequired[int] - owned_by: NotRequired[str] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - max_context_length: NotRequired[int] - aliases: NotRequired[List[str]] - deprecation: NotRequired[Nullable[datetime]] - deprecation_replacement_model: NotRequired[Nullable[str]] - default_model_temperature: NotRequired[Nullable[float]] - type: FTModelCardType - archived: NotRequired[bool] - - -class FTModelCard(BaseModel): - r"""Extra fields for fine-tuned models.""" - - id: str - - capabilities: ModelCapabilities - - job: str - - root: str - - object: Optional[str] = "model" - - created: Optional[int] = None - - owned_by: Optional[str] = "mistralai" - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - max_context_length: Optional[int] = 32768 - - aliases: Optional[List[str]] = None - - deprecation: OptionalNullable[datetime] = UNSET - - deprecation_replacement_model: OptionalNullable[str] = UNSET - - default_model_temperature: OptionalNullable[float] = UNSET - - TYPE: Annotated[ - Annotated[ - Optional[FTModelCardType], AfterValidator(validate_const("fine-tuned")) - ], - pydantic.Field(alias="type"), - ] = "fine-tuned" - - archived: Optional[bool] = False - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "created", - "owned_by", - "name", - "description", - "max_context_length", - "aliases", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - "type", - "archived", - ] - nullable_fields = [ - "name", - "description", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/function.py b/src/mistralai/models/function.py deleted file mode 100644 index 7d40cf75..00000000 --- a/src/mistralai/models/function.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Any, Dict, Optional -from typing_extensions import NotRequired, TypedDict - - -class FunctionTypedDict(TypedDict): - name: str - parameters: Dict[str, Any] - description: NotRequired[str] - strict: NotRequired[bool] - - -class Function(BaseModel): - name: str - - parameters: Dict[str, Any] - - description: Optional[str] = None - - strict: Optional[bool] = None diff --git a/src/mistralai/models/functioncall.py b/src/mistralai/models/functioncall.py deleted file mode 100644 index 0cce622a..00000000 --- a/src/mistralai/models/functioncall.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Any, Dict, Union -from typing_extensions import TypeAliasType, TypedDict - - -ArgumentsTypedDict = TypeAliasType("ArgumentsTypedDict", Union[Dict[str, Any], str]) - - -Arguments = TypeAliasType("Arguments", Union[Dict[str, Any], str]) - - -class FunctionCallTypedDict(TypedDict): - name: str - arguments: ArgumentsTypedDict - - -class FunctionCall(BaseModel): - name: str - - arguments: Arguments diff --git a/src/mistralai/models/functioncallentry.py b/src/mistralai/models/functioncallentry.py deleted file mode 100644 index 4ea62c4f..00000000 --- a/src/mistralai/models/functioncallentry.py +++ /dev/null @@ -1,77 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .functioncallentryarguments import ( - FunctionCallEntryArguments, - FunctionCallEntryArgumentsTypedDict, -) -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -FunctionCallEntryObject = Literal["entry",] - - -FunctionCallEntryType = Literal["function.call",] - - -class FunctionCallEntryTypedDict(TypedDict): - tool_call_id: str - name: str - arguments: FunctionCallEntryArgumentsTypedDict - object: NotRequired[FunctionCallEntryObject] - type: NotRequired[FunctionCallEntryType] - created_at: NotRequired[datetime] - completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] - - -class FunctionCallEntry(BaseModel): - tool_call_id: str - - name: str - - arguments: FunctionCallEntryArguments - - object: Optional[FunctionCallEntryObject] = "entry" - - type: Optional[FunctionCallEntryType] = "function.call" - - created_at: Optional[datetime] = None - - completed_at: OptionalNullable[datetime] = UNSET - - id: Optional[str] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id"] - nullable_fields = ["completed_at"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/functioncallentryarguments.py b/src/mistralai/models/functioncallentryarguments.py deleted file mode 100644 index ac9e6227..00000000 --- a/src/mistralai/models/functioncallentryarguments.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Any, Dict, Union -from typing_extensions import TypeAliasType - - -FunctionCallEntryArgumentsTypedDict = TypeAliasType( - "FunctionCallEntryArgumentsTypedDict", Union[Dict[str, Any], str] -) - - -FunctionCallEntryArguments = TypeAliasType( - "FunctionCallEntryArguments", Union[Dict[str, Any], str] -) diff --git a/src/mistralai/models/functioncallevent.py b/src/mistralai/models/functioncallevent.py deleted file mode 100644 index e3992cf1..00000000 --- a/src/mistralai/models/functioncallevent.py +++ /dev/null @@ -1,36 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -FunctionCallEventType = Literal["function.call.delta",] - - -class FunctionCallEventTypedDict(TypedDict): - id: str - name: str - tool_call_id: str - arguments: str - type: NotRequired[FunctionCallEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - - -class FunctionCallEvent(BaseModel): - id: str - - name: str - - tool_call_id: str - - arguments: str - - type: Optional[FunctionCallEventType] = "function.call.delta" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 diff --git a/src/mistralai/models/functionname.py b/src/mistralai/models/functionname.py deleted file mode 100644 index 0a6c0b14..00000000 --- a/src/mistralai/models/functionname.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class FunctionNameTypedDict(TypedDict): - r"""this restriction of `Function` is used to select a specific function to call""" - - name: str - - -class FunctionName(BaseModel): - r"""this restriction of `Function` is used to select a specific function to call""" - - name: str diff --git a/src/mistralai/models/functionresultentry.py b/src/mistralai/models/functionresultentry.py deleted file mode 100644 index 1c61395a..00000000 --- a/src/mistralai/models/functionresultentry.py +++ /dev/null @@ -1,70 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -FunctionResultEntryObject = Literal["entry",] - - -FunctionResultEntryType = Literal["function.result",] - - -class FunctionResultEntryTypedDict(TypedDict): - tool_call_id: str - result: str - object: NotRequired[FunctionResultEntryObject] - type: NotRequired[FunctionResultEntryType] - created_at: NotRequired[datetime] - completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] - - -class FunctionResultEntry(BaseModel): - tool_call_id: str - - result: str - - object: Optional[FunctionResultEntryObject] = "entry" - - type: Optional[FunctionResultEntryType] = "function.result" - - created_at: Optional[datetime] = None - - completed_at: OptionalNullable[datetime] = UNSET - - id: Optional[str] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id"] - nullable_fields = ["completed_at"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/functiontool.py b/src/mistralai/models/functiontool.py deleted file mode 100644 index 009fe280..00000000 --- a/src/mistralai/models/functiontool.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .function import Function, FunctionTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -FunctionToolType = Literal["function",] - - -class FunctionToolTypedDict(TypedDict): - function: FunctionTypedDict - type: NotRequired[FunctionToolType] - - -class FunctionTool(BaseModel): - function: Function - - type: Optional[FunctionToolType] = "function" diff --git a/src/mistralai/models/githubrepositoryin.py b/src/mistralai/models/githubrepositoryin.py deleted file mode 100644 index b16ce0d2..00000000 --- a/src/mistralai/models/githubrepositoryin.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -GithubRepositoryInType = Literal["github",] - - -class GithubRepositoryInTypedDict(TypedDict): - name: str - owner: str - token: str - type: NotRequired[GithubRepositoryInType] - ref: NotRequired[Nullable[str]] - weight: NotRequired[float] - - -class GithubRepositoryIn(BaseModel): - name: str - - owner: str - - token: str - - type: Optional[GithubRepositoryInType] = "github" - - ref: OptionalNullable[str] = UNSET - - weight: Optional[float] = 1 - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "ref", "weight"] - nullable_fields = ["ref"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/githubrepositoryout.py b/src/mistralai/models/githubrepositoryout.py deleted file mode 100644 index 372477c1..00000000 --- a/src/mistralai/models/githubrepositoryout.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -GithubRepositoryOutType = Literal["github",] - - -class GithubRepositoryOutTypedDict(TypedDict): - name: str - owner: str - commit_id: str - type: NotRequired[GithubRepositoryOutType] - ref: NotRequired[Nullable[str]] - weight: NotRequired[float] - - -class GithubRepositoryOut(BaseModel): - name: str - - owner: str - - commit_id: str - - type: Optional[GithubRepositoryOutType] = "github" - - ref: OptionalNullable[str] = UNSET - - weight: Optional[float] = 1 - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "ref", "weight"] - nullable_fields = ["ref"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/httpvalidationerror.py b/src/mistralai/models/httpvalidationerror.py deleted file mode 100644 index d467577a..00000000 --- a/src/mistralai/models/httpvalidationerror.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .validationerror import ValidationError -from dataclasses import dataclass, field -import httpx -from mistralai.models import MistralError -from mistralai.types import BaseModel -from typing import List, Optional - - -class HTTPValidationErrorData(BaseModel): - detail: Optional[List[ValidationError]] = None - - -@dataclass(unsafe_hash=True) -class HTTPValidationError(MistralError): - data: HTTPValidationErrorData = field(hash=False) - - def __init__( - self, - data: HTTPValidationErrorData, - raw_response: httpx.Response, - body: Optional[str] = None, - ): - message = body or raw_response.text - super().__init__(message, raw_response, body) - object.__setattr__(self, "data", data) diff --git a/src/mistralai/models/imagegenerationtool.py b/src/mistralai/models/imagegenerationtool.py deleted file mode 100644 index a92335db..00000000 --- a/src/mistralai/models/imagegenerationtool.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ImageGenerationToolType = Literal["image_generation",] - - -class ImageGenerationToolTypedDict(TypedDict): - type: NotRequired[ImageGenerationToolType] - - -class ImageGenerationTool(BaseModel): - type: Optional[ImageGenerationToolType] = "image_generation" diff --git a/src/mistralai/models/imageurl.py b/src/mistralai/models/imageurl.py deleted file mode 100644 index 6f077b69..00000000 --- a/src/mistralai/models/imageurl.py +++ /dev/null @@ -1,47 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class ImageURLTypedDict(TypedDict): - url: str - detail: NotRequired[Nullable[str]] - - -class ImageURL(BaseModel): - url: str - - detail: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["detail"] - nullable_fields = ["detail"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/imageurlchunk.py b/src/mistralai/models/imageurlchunk.py deleted file mode 100644 index 8e8aac42..00000000 --- a/src/mistralai/models/imageurlchunk.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .imageurl import ImageURL, ImageURLTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ImageURLChunkImageURLTypedDict = TypeAliasType( - "ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str] -) - - -ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) - - -ImageURLChunkType = Literal["image_url",] - - -class ImageURLChunkTypedDict(TypedDict): - r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - - image_url: ImageURLChunkImageURLTypedDict - type: NotRequired[ImageURLChunkType] - - -class ImageURLChunk(BaseModel): - r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - - image_url: ImageURLChunkImageURL - - type: Optional[ImageURLChunkType] = "image_url" diff --git a/src/mistralai/models/inputentries.py b/src/mistralai/models/inputentries.py deleted file mode 100644 index 8ae29837..00000000 --- a/src/mistralai/models/inputentries.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict -from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict -from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict -from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict -from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict -from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict -from typing import Union -from typing_extensions import TypeAliasType - - -InputEntriesTypedDict = TypeAliasType( - "InputEntriesTypedDict", - Union[ - FunctionResultEntryTypedDict, - MessageInputEntryTypedDict, - FunctionCallEntryTypedDict, - ToolExecutionEntryTypedDict, - MessageOutputEntryTypedDict, - AgentHandoffEntryTypedDict, - ], -) - - -InputEntries = TypeAliasType( - "InputEntries", - Union[ - FunctionResultEntry, - MessageInputEntry, - FunctionCallEntry, - ToolExecutionEntry, - MessageOutputEntry, - AgentHandoffEntry, - ], -) diff --git a/src/mistralai/models/inputs.py b/src/mistralai/models/inputs.py deleted file mode 100644 index 34d20f34..00000000 --- a/src/mistralai/models/inputs.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .instructrequest import InstructRequest, InstructRequestTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import List, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -InstructRequestInputsMessagesTypedDict = TypeAliasType( - "InstructRequestInputsMessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -InstructRequestInputsMessages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -class InstructRequestInputsTypedDict(TypedDict): - messages: List[InstructRequestInputsMessagesTypedDict] - - -class InstructRequestInputs(BaseModel): - messages: List[InstructRequestInputsMessages] - - -InputsTypedDict = TypeAliasType( - "InputsTypedDict", - Union[InstructRequestInputsTypedDict, List[InstructRequestTypedDict]], -) -r"""Chat to classify""" - - -Inputs = TypeAliasType("Inputs", Union[InstructRequestInputs, List[InstructRequest]]) -r"""Chat to classify""" diff --git a/src/mistralai/models/instructrequest.py b/src/mistralai/models/instructrequest.py deleted file mode 100644 index dddbda00..00000000 --- a/src/mistralai/models/instructrequest.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import List, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -InstructRequestMessagesTypedDict = TypeAliasType( - "InstructRequestMessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -InstructRequestMessages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -class InstructRequestTypedDict(TypedDict): - messages: List[InstructRequestMessagesTypedDict] - - -class InstructRequest(BaseModel): - messages: List[InstructRequestMessages] diff --git a/src/mistralai/models/jobin.py b/src/mistralai/models/jobin.py deleted file mode 100644 index aa0cd06c..00000000 --- a/src/mistralai/models/jobin.py +++ /dev/null @@ -1,141 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict -from .classifiertrainingparametersin import ( - ClassifierTrainingParametersIn, - ClassifierTrainingParametersInTypedDict, -) -from .completiontrainingparametersin import ( - CompletionTrainingParametersIn, - CompletionTrainingParametersInTypedDict, -) -from .finetuneablemodeltype import FineTuneableModelType -from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict -from .trainingfile import TrainingFile, TrainingFileTypedDict -from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -JobInIntegrationsTypedDict = WandbIntegrationTypedDict - - -JobInIntegrations = WandbIntegration - - -HyperparametersTypedDict = TypeAliasType( - "HyperparametersTypedDict", - Union[ - ClassifierTrainingParametersInTypedDict, CompletionTrainingParametersInTypedDict - ], -) - - -Hyperparameters = TypeAliasType( - "Hyperparameters", - Union[ClassifierTrainingParametersIn, CompletionTrainingParametersIn], -) - - -JobInRepositoriesTypedDict = GithubRepositoryInTypedDict - - -JobInRepositories = GithubRepositoryIn - - -class JobInTypedDict(TypedDict): - model: str - r"""The name of the model to fine-tune.""" - hyperparameters: HyperparametersTypedDict - training_files: NotRequired[List[TrainingFileTypedDict]] - validation_files: NotRequired[Nullable[List[str]]] - r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" - suffix: NotRequired[Nullable[str]] - r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" - integrations: NotRequired[Nullable[List[JobInIntegrationsTypedDict]]] - r"""A list of integrations to enable for your fine-tuning job.""" - auto_start: NotRequired[bool] - r"""This field will be required in a future release.""" - invalid_sample_skip_percentage: NotRequired[float] - job_type: NotRequired[Nullable[FineTuneableModelType]] - repositories: NotRequired[Nullable[List[JobInRepositoriesTypedDict]]] - classifier_targets: NotRequired[Nullable[List[ClassifierTargetInTypedDict]]] - - -class JobIn(BaseModel): - model: str - r"""The name of the model to fine-tune.""" - - hyperparameters: Hyperparameters - - training_files: Optional[List[TrainingFile]] = None - - validation_files: OptionalNullable[List[str]] = UNSET - r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" - - suffix: OptionalNullable[str] = UNSET - r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" - - integrations: OptionalNullable[List[JobInIntegrations]] = UNSET - r"""A list of integrations to enable for your fine-tuning job.""" - - auto_start: Optional[bool] = None - r"""This field will be required in a future release.""" - - invalid_sample_skip_percentage: Optional[float] = 0 - - job_type: OptionalNullable[FineTuneableModelType] = UNSET - - repositories: OptionalNullable[List[JobInRepositories]] = UNSET - - classifier_targets: OptionalNullable[List[ClassifierTargetIn]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_files", - "validation_files", - "suffix", - "integrations", - "auto_start", - "invalid_sample_skip_percentage", - "job_type", - "repositories", - "classifier_targets", - ] - nullable_fields = [ - "validation_files", - "suffix", - "integrations", - "job_type", - "repositories", - "classifier_targets", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/jobmetadataout.py b/src/mistralai/models/jobmetadataout.py deleted file mode 100644 index 10ef781e..00000000 --- a/src/mistralai/models/jobmetadataout.py +++ /dev/null @@ -1,78 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class JobMetadataOutTypedDict(TypedDict): - expected_duration_seconds: NotRequired[Nullable[int]] - cost: NotRequired[Nullable[float]] - cost_currency: NotRequired[Nullable[str]] - train_tokens_per_step: NotRequired[Nullable[int]] - train_tokens: NotRequired[Nullable[int]] - data_tokens: NotRequired[Nullable[int]] - estimated_start_time: NotRequired[Nullable[int]] - - -class JobMetadataOut(BaseModel): - expected_duration_seconds: OptionalNullable[int] = UNSET - - cost: OptionalNullable[float] = UNSET - - cost_currency: OptionalNullable[str] = UNSET - - train_tokens_per_step: OptionalNullable[int] = UNSET - - train_tokens: OptionalNullable[int] = UNSET - - data_tokens: OptionalNullable[int] = UNSET - - estimated_start_time: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - ] - nullable_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py b/src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py deleted file mode 100644 index 5b83d534..00000000 --- a/src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class JobsAPIRoutesBatchCancelBatchJobRequestTypedDict(TypedDict): - job_id: str - - -class JobsAPIRoutesBatchCancelBatchJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py deleted file mode 100644 index 9bfaf9c5..00000000 --- a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing_extensions import Annotated, NotRequired, TypedDict - - -class JobsAPIRoutesBatchGetBatchJobRequestTypedDict(TypedDict): - job_id: str - inline: NotRequired[Nullable[bool]] - - -class JobsAPIRoutesBatchGetBatchJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - inline: Annotated[ - OptionalNullable[bool], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["inline"] - nullable_fields = ["inline"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py deleted file mode 100644 index c48246d5..00000000 --- a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +++ /dev/null @@ -1,102 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .batchjobstatus import BatchJobStatus -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import Any, Dict, List, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): - page: NotRequired[int] - page_size: NotRequired[int] - model: NotRequired[Nullable[str]] - agent_id: NotRequired[Nullable[str]] - metadata: NotRequired[Nullable[Dict[str, Any]]] - created_after: NotRequired[Nullable[datetime]] - created_by_me: NotRequired[bool] - status: NotRequired[Nullable[List[BatchJobStatus]]] - - -class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 100 - - model: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - agent_id: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - metadata: Annotated[ - OptionalNullable[Dict[str, Any]], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - created_after: Annotated[ - OptionalNullable[datetime], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - created_by_me: Annotated[ - Optional[bool], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = False - - status: Annotated[ - OptionalNullable[List[BatchJobStatus]], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "model", - "agent_id", - "metadata", - "created_after", - "created_by_me", - "status", - ] - nullable_fields = ["model", "agent_id", "metadata", "created_after", "status"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py deleted file mode 100644 index d728efd1..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict(TypedDict): - model_id: str - r"""The ID of the model to archive.""" - - -class JobsAPIRoutesFineTuningArchiveFineTunedModelRequest(BaseModel): - model_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the model to archive.""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py deleted file mode 100644 index ceb19a69..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +++ /dev/null @@ -1,45 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutTypedDict, -) -from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): - job_id: str - r"""The ID of the job to cancel.""" - - -class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the job to cancel.""" - - -JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", - Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], -) -r"""OK""" - - -JobsAPIRoutesFineTuningCancelFineTuningJobResponse = Annotated[ - Union[ - Annotated[ClassifierDetailedJobOut, Tag("classifier")], - Annotated[CompletionDetailedJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), -] -r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py deleted file mode 100644 index 39af3ea6..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict -from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict -from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutTypedDict -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType - - -Response1TypedDict = TypeAliasType( - "Response1TypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] -) - - -Response1 = Annotated[ - Union[ - Annotated[ClassifierJobOut, Tag("classifier")], - Annotated[CompletionJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), -] - - -JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", - Union[LegacyJobMetadataOutTypedDict, Response1TypedDict], -) -r"""OK""" - - -JobsAPIRoutesFineTuningCreateFineTuningJobResponse = TypeAliasType( - "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", - Union[LegacyJobMetadataOut, Response1], -) -r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py deleted file mode 100644 index be99dd2d..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +++ /dev/null @@ -1,45 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutTypedDict, -) -from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): - job_id: str - r"""The ID of the job to analyse.""" - - -class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the job to analyse.""" - - -JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", - Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], -) -r"""OK""" - - -JobsAPIRoutesFineTuningGetFineTuningJobResponse = Annotated[ - Union[ - Annotated[ClassifierDetailedJobOut, Tag("classifier")], - Annotated[CompletionDetailedJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), -] -r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py deleted file mode 100644 index 9aec8eb2..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +++ /dev/null @@ -1,156 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -QueryParamStatus = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", -] -r"""The current job state to filter on. When set, the other results are not displayed.""" - - -class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): - page: NotRequired[int] - r"""The page number of the results to be returned.""" - page_size: NotRequired[int] - r"""The number of items to return per page.""" - model: NotRequired[Nullable[str]] - r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" - created_after: NotRequired[Nullable[datetime]] - r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" - created_before: NotRequired[Nullable[datetime]] - created_by_me: NotRequired[bool] - r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" - status: NotRequired[Nullable[QueryParamStatus]] - r"""The current job state to filter on. When set, the other results are not displayed.""" - wandb_project: NotRequired[Nullable[str]] - r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" - wandb_name: NotRequired[Nullable[str]] - r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" - suffix: NotRequired[Nullable[str]] - r"""The model suffix to filter on. When set, the other results are not displayed.""" - - -class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - r"""The page number of the results to be returned.""" - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 100 - r"""The number of items to return per page.""" - - model: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" - - created_after: Annotated[ - OptionalNullable[datetime], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" - - created_before: Annotated[ - OptionalNullable[datetime], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - created_by_me: Annotated[ - Optional[bool], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = False - r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" - - status: Annotated[ - OptionalNullable[QueryParamStatus], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - r"""The current job state to filter on. When set, the other results are not displayed.""" - - wandb_project: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" - - wandb_name: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" - - suffix: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - r"""The model suffix to filter on. When set, the other results are not displayed.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "model", - "created_after", - "created_before", - "created_by_me", - "status", - "wandb_project", - "wandb_name", - "suffix", - ] - nullable_fields = [ - "model", - "created_after", - "created_before", - "status", - "wandb_project", - "wandb_name", - "suffix", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py deleted file mode 100644 index 8103b67b..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutTypedDict, -) -from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): - job_id: str - - -class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - -JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", - Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], -) -r"""OK""" - - -JobsAPIRoutesFineTuningStartFineTuningJobResponse = Annotated[ - Union[ - Annotated[ClassifierDetailedJobOut, Tag("classifier")], - Annotated[CompletionDetailedJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), -] -r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py deleted file mode 100644 index a84274ff..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict(TypedDict): - model_id: str - r"""The ID of the model to unarchive.""" - - -class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest(BaseModel): - model_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the model to unarchive.""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py deleted file mode 100644 index a10528ca..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +++ /dev/null @@ -1,51 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifierftmodelout import ClassifierFTModelOut, ClassifierFTModelOutTypedDict -from .completionftmodelout import CompletionFTModelOut, CompletionFTModelOutTypedDict -from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict -from mistralai.types import BaseModel -from mistralai.utils import ( - FieldMetadata, - PathParamMetadata, - RequestMetadata, - get_discriminator, -) -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): - model_id: str - r"""The ID of the model to update.""" - update_ft_model_in: UpdateFTModelInTypedDict - - -class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): - model_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the model to update.""" - - update_ft_model_in: Annotated[ - UpdateFTModelIn, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] - - -JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", - Union[CompletionFTModelOutTypedDict, ClassifierFTModelOutTypedDict], -) -r"""OK""" - - -JobsAPIRoutesFineTuningUpdateFineTunedModelResponse = Annotated[ - Union[ - Annotated[ClassifierFTModelOut, Tag("classifier")], - Annotated[CompletionFTModelOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "model_type", "model_type")), -] -r"""OK""" diff --git a/src/mistralai/models/jobsout.py b/src/mistralai/models/jobsout.py deleted file mode 100644 index 680b1d58..00000000 --- a/src/mistralai/models/jobsout.py +++ /dev/null @@ -1,41 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict -from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -JobsOutDataTypedDict = TypeAliasType( - "JobsOutDataTypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] -) - - -JobsOutData = Annotated[ - Union[ - Annotated[ClassifierJobOut, Tag("classifier")], - Annotated[CompletionJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), -] - - -JobsOutObject = Literal["list",] - - -class JobsOutTypedDict(TypedDict): - total: int - data: NotRequired[List[JobsOutDataTypedDict]] - object: NotRequired[JobsOutObject] - - -class JobsOut(BaseModel): - total: int - - data: Optional[List[JobsOutData]] = None - - object: Optional[JobsOutObject] = "list" diff --git a/src/mistralai/models/jsonschema.py b/src/mistralai/models/jsonschema.py deleted file mode 100644 index e2b6a45e..00000000 --- a/src/mistralai/models/jsonschema.py +++ /dev/null @@ -1,55 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import model_serializer -from typing import Any, Dict, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class JSONSchemaTypedDict(TypedDict): - name: str - schema_definition: Dict[str, Any] - description: NotRequired[Nullable[str]] - strict: NotRequired[bool] - - -class JSONSchema(BaseModel): - name: str - - schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] - - description: OptionalNullable[str] = UNSET - - strict: Optional[bool] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["description", "strict"] - nullable_fields = ["description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/legacyjobmetadataout.py b/src/mistralai/models/legacyjobmetadataout.py deleted file mode 100644 index 49951219..00000000 --- a/src/mistralai/models/legacyjobmetadataout.py +++ /dev/null @@ -1,119 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -LegacyJobMetadataOutObject = Literal["job.metadata",] - - -class LegacyJobMetadataOutTypedDict(TypedDict): - details: str - expected_duration_seconds: NotRequired[Nullable[int]] - r"""The approximated time (in seconds) for the fine-tuning process to complete.""" - cost: NotRequired[Nullable[float]] - r"""The cost of the fine-tuning job.""" - cost_currency: NotRequired[Nullable[str]] - r"""The currency used for the fine-tuning job cost.""" - train_tokens_per_step: NotRequired[Nullable[int]] - r"""The number of tokens consumed by one training step.""" - train_tokens: NotRequired[Nullable[int]] - r"""The total number of tokens used during the fine-tuning process.""" - data_tokens: NotRequired[Nullable[int]] - r"""The total number of tokens in the training dataset.""" - estimated_start_time: NotRequired[Nullable[int]] - deprecated: NotRequired[bool] - epochs: NotRequired[Nullable[float]] - r"""The number of complete passes through the entire training dataset.""" - training_steps: NotRequired[Nullable[int]] - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - object: NotRequired[LegacyJobMetadataOutObject] - - -class LegacyJobMetadataOut(BaseModel): - details: str - - expected_duration_seconds: OptionalNullable[int] = UNSET - r"""The approximated time (in seconds) for the fine-tuning process to complete.""" - - cost: OptionalNullable[float] = UNSET - r"""The cost of the fine-tuning job.""" - - cost_currency: OptionalNullable[str] = UNSET - r"""The currency used for the fine-tuning job cost.""" - - train_tokens_per_step: OptionalNullable[int] = UNSET - r"""The number of tokens consumed by one training step.""" - - train_tokens: OptionalNullable[int] = UNSET - r"""The total number of tokens used during the fine-tuning process.""" - - data_tokens: OptionalNullable[int] = UNSET - r"""The total number of tokens in the training dataset.""" - - estimated_start_time: OptionalNullable[int] = UNSET - - deprecated: Optional[bool] = True - - epochs: OptionalNullable[float] = UNSET - r"""The number of complete passes through the entire training dataset.""" - - training_steps: OptionalNullable[int] = UNSET - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - - object: Optional[LegacyJobMetadataOutObject] = "job.metadata" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - "deprecated", - "epochs", - "training_steps", - "object", - ] - nullable_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - "epochs", - "training_steps", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/libraries_delete_v1op.py b/src/mistralai/models/libraries_delete_v1op.py deleted file mode 100644 index 56f8f8a8..00000000 --- a/src/mistralai/models/libraries_delete_v1op.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDeleteV1RequestTypedDict(TypedDict): - library_id: str - - -class LibrariesDeleteV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_documents_delete_v1op.py b/src/mistralai/models/libraries_documents_delete_v1op.py deleted file mode 100644 index c33710b0..00000000 --- a/src/mistralai/models/libraries_documents_delete_v1op.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsDeleteV1RequestTypedDict(TypedDict): - library_id: str - document_id: str - - -class LibrariesDocumentsDeleteV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py b/src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py deleted file mode 100644 index e2459c1c..00000000 --- a/src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict(TypedDict): - library_id: str - document_id: str - - -class LibrariesDocumentsGetExtractedTextSignedURLV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_documents_get_signed_url_v1op.py b/src/mistralai/models/libraries_documents_get_signed_url_v1op.py deleted file mode 100644 index bc913ba5..00000000 --- a/src/mistralai/models/libraries_documents_get_signed_url_v1op.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsGetSignedURLV1RequestTypedDict(TypedDict): - library_id: str - document_id: str - - -class LibrariesDocumentsGetSignedURLV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_documents_get_status_v1op.py b/src/mistralai/models/libraries_documents_get_status_v1op.py deleted file mode 100644 index 08992d7c..00000000 --- a/src/mistralai/models/libraries_documents_get_status_v1op.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsGetStatusV1RequestTypedDict(TypedDict): - library_id: str - document_id: str - - -class LibrariesDocumentsGetStatusV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_documents_get_text_content_v1op.py b/src/mistralai/models/libraries_documents_get_text_content_v1op.py deleted file mode 100644 index 21a131ad..00000000 --- a/src/mistralai/models/libraries_documents_get_text_content_v1op.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsGetTextContentV1RequestTypedDict(TypedDict): - library_id: str - document_id: str - - -class LibrariesDocumentsGetTextContentV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_documents_get_v1op.py b/src/mistralai/models/libraries_documents_get_v1op.py deleted file mode 100644 index ff2bdedb..00000000 --- a/src/mistralai/models/libraries_documents_get_v1op.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsGetV1RequestTypedDict(TypedDict): - library_id: str - document_id: str - - -class LibrariesDocumentsGetV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_documents_list_v1op.py b/src/mistralai/models/libraries_documents_list_v1op.py deleted file mode 100644 index e6ff29cf..00000000 --- a/src/mistralai/models/libraries_documents_list_v1op.py +++ /dev/null @@ -1,91 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class LibrariesDocumentsListV1RequestTypedDict(TypedDict): - library_id: str - search: NotRequired[Nullable[str]] - page_size: NotRequired[int] - page: NotRequired[int] - filters_attributes: NotRequired[Nullable[str]] - sort_by: NotRequired[str] - sort_order: NotRequired[str] - - -class LibrariesDocumentsListV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - search: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 100 - - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - - filters_attributes: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - sort_by: Annotated[ - Optional[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = "created_at" - - sort_order: Annotated[ - Optional[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = "desc" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "search", - "page_size", - "page", - "filters_attributes", - "sort_by", - "sort_order", - ] - nullable_fields = ["search", "filters_attributes"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/libraries_documents_reprocess_v1op.py b/src/mistralai/models/libraries_documents_reprocess_v1op.py deleted file mode 100644 index 861993e7..00000000 --- a/src/mistralai/models/libraries_documents_reprocess_v1op.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsReprocessV1RequestTypedDict(TypedDict): - library_id: str - document_id: str - - -class LibrariesDocumentsReprocessV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_documents_update_v1op.py b/src/mistralai/models/libraries_documents_update_v1op.py deleted file mode 100644 index 5551d5ee..00000000 --- a/src/mistralai/models/libraries_documents_update_v1op.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .documentupdatein import DocumentUpdateIn, DocumentUpdateInTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsUpdateV1RequestTypedDict(TypedDict): - library_id: str - document_id: str - document_update_in: DocumentUpdateInTypedDict - - -class LibrariesDocumentsUpdateV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_update_in: Annotated[ - DocumentUpdateIn, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/libraries_documents_upload_v1op.py b/src/mistralai/models/libraries_documents_upload_v1op.py deleted file mode 100644 index 51f536cc..00000000 --- a/src/mistralai/models/libraries_documents_upload_v1op.py +++ /dev/null @@ -1,56 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .file import File, FileTypedDict -from mistralai.types import BaseModel -from mistralai.utils import ( - FieldMetadata, - MultipartFormMetadata, - PathParamMetadata, - RequestMetadata, -) -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsUploadV1DocumentUploadTypedDict(TypedDict): - file: FileTypedDict - r"""The File object (not file name) to be uploaded. - To upload a file and specify a custom file name you should format your request as such: - ```bash - file=@path/to/your/file.jsonl;filename=custom_name.jsonl - ``` - Otherwise, you can just keep the original file name: - ```bash - file=@path/to/your/file.jsonl - ``` - """ - - -class LibrariesDocumentsUploadV1DocumentUpload(BaseModel): - file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] - r"""The File object (not file name) to be uploaded. - To upload a file and specify a custom file name you should format your request as such: - ```bash - file=@path/to/your/file.jsonl;filename=custom_name.jsonl - ``` - Otherwise, you can just keep the original file name: - ```bash - file=@path/to/your/file.jsonl - ``` - """ - - -class LibrariesDocumentsUploadV1RequestTypedDict(TypedDict): - library_id: str - request_body: LibrariesDocumentsUploadV1DocumentUploadTypedDict - - -class LibrariesDocumentsUploadV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - request_body: Annotated[ - LibrariesDocumentsUploadV1DocumentUpload, - FieldMetadata(request=RequestMetadata(media_type="multipart/form-data")), - ] diff --git a/src/mistralai/models/libraries_get_v1op.py b/src/mistralai/models/libraries_get_v1op.py deleted file mode 100644 index b87090f6..00000000 --- a/src/mistralai/models/libraries_get_v1op.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesGetV1RequestTypedDict(TypedDict): - library_id: str - - -class LibrariesGetV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_share_create_v1op.py b/src/mistralai/models/libraries_share_create_v1op.py deleted file mode 100644 index a8b0e35d..00000000 --- a/src/mistralai/models/libraries_share_create_v1op.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .sharingin import SharingIn, SharingInTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesShareCreateV1RequestTypedDict(TypedDict): - library_id: str - sharing_in: SharingInTypedDict - - -class LibrariesShareCreateV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - sharing_in: Annotated[ - SharingIn, FieldMetadata(request=RequestMetadata(media_type="application/json")) - ] diff --git a/src/mistralai/models/libraries_share_delete_v1op.py b/src/mistralai/models/libraries_share_delete_v1op.py deleted file mode 100644 index e29d556a..00000000 --- a/src/mistralai/models/libraries_share_delete_v1op.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .sharingdelete import SharingDelete, SharingDeleteTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesShareDeleteV1RequestTypedDict(TypedDict): - library_id: str - sharing_delete: SharingDeleteTypedDict - - -class LibrariesShareDeleteV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - sharing_delete: Annotated[ - SharingDelete, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/libraries_share_list_v1op.py b/src/mistralai/models/libraries_share_list_v1op.py deleted file mode 100644 index b276d756..00000000 --- a/src/mistralai/models/libraries_share_list_v1op.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesShareListV1RequestTypedDict(TypedDict): - library_id: str - - -class LibrariesShareListV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_update_v1op.py b/src/mistralai/models/libraries_update_v1op.py deleted file mode 100644 index c93895d9..00000000 --- a/src/mistralai/models/libraries_update_v1op.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesUpdateV1RequestTypedDict(TypedDict): - library_id: str - library_in_update: LibraryInUpdateTypedDict - - -class LibrariesUpdateV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - library_in_update: Annotated[ - LibraryInUpdate, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/libraryin.py b/src/mistralai/models/libraryin.py deleted file mode 100644 index 872d494d..00000000 --- a/src/mistralai/models/libraryin.py +++ /dev/null @@ -1,50 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class LibraryInTypedDict(TypedDict): - name: str - description: NotRequired[Nullable[str]] - chunk_size: NotRequired[Nullable[int]] - - -class LibraryIn(BaseModel): - name: str - - description: OptionalNullable[str] = UNSET - - chunk_size: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["description", "chunk_size"] - nullable_fields = ["description", "chunk_size"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/libraryinupdate.py b/src/mistralai/models/libraryinupdate.py deleted file mode 100644 index 6e8ab81a..00000000 --- a/src/mistralai/models/libraryinupdate.py +++ /dev/null @@ -1,47 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class LibraryInUpdateTypedDict(TypedDict): - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - - -class LibraryInUpdate(BaseModel): - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["name", "description"] - nullable_fields = ["name", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/libraryout.py b/src/mistralai/models/libraryout.py deleted file mode 100644 index d3bc36f9..00000000 --- a/src/mistralai/models/libraryout.py +++ /dev/null @@ -1,110 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class LibraryOutTypedDict(TypedDict): - id: str - name: str - created_at: datetime - updated_at: datetime - owner_id: Nullable[str] - owner_type: str - total_size: int - nb_documents: int - chunk_size: Nullable[int] - emoji: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - generated_description: NotRequired[Nullable[str]] - explicit_user_members_count: NotRequired[Nullable[int]] - explicit_workspace_members_count: NotRequired[Nullable[int]] - org_sharing_role: NotRequired[Nullable[str]] - generated_name: NotRequired[Nullable[str]] - r"""Generated Name""" - - -class LibraryOut(BaseModel): - id: str - - name: str - - created_at: datetime - - updated_at: datetime - - owner_id: Nullable[str] - - owner_type: str - - total_size: int - - nb_documents: int - - chunk_size: Nullable[int] - - emoji: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - generated_description: OptionalNullable[str] = UNSET - - explicit_user_members_count: OptionalNullable[int] = UNSET - - explicit_workspace_members_count: OptionalNullable[int] = UNSET - - org_sharing_role: OptionalNullable[str] = UNSET - - generated_name: OptionalNullable[str] = UNSET - r"""Generated Name""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "emoji", - "description", - "generated_description", - "explicit_user_members_count", - "explicit_workspace_members_count", - "org_sharing_role", - "generated_name", - ] - nullable_fields = [ - "owner_id", - "chunk_size", - "emoji", - "description", - "generated_description", - "explicit_user_members_count", - "explicit_workspace_members_count", - "org_sharing_role", - "generated_name", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/listdocumentout.py b/src/mistralai/models/listdocumentout.py deleted file mode 100644 index 9d39e087..00000000 --- a/src/mistralai/models/listdocumentout.py +++ /dev/null @@ -1,19 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .documentout import DocumentOut, DocumentOutTypedDict -from .paginationinfo import PaginationInfo, PaginationInfoTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ListDocumentOutTypedDict(TypedDict): - pagination: PaginationInfoTypedDict - data: List[DocumentOutTypedDict] - - -class ListDocumentOut(BaseModel): - pagination: PaginationInfo - - data: List[DocumentOut] diff --git a/src/mistralai/models/listfilesout.py b/src/mistralai/models/listfilesout.py deleted file mode 100644 index 2f82b37d..00000000 --- a/src/mistralai/models/listfilesout.py +++ /dev/null @@ -1,52 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .fileschema import FileSchema, FileSchemaTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List -from typing_extensions import NotRequired, TypedDict - - -class ListFilesOutTypedDict(TypedDict): - data: List[FileSchemaTypedDict] - object: str - total: NotRequired[Nullable[int]] - - -class ListFilesOut(BaseModel): - data: List[FileSchema] - - object: str - - total: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["total"] - nullable_fields = ["total"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/listlibraryout.py b/src/mistralai/models/listlibraryout.py deleted file mode 100644 index 1e647fe1..00000000 --- a/src/mistralai/models/listlibraryout.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .libraryout import LibraryOut, LibraryOutTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ListLibraryOutTypedDict(TypedDict): - data: List[LibraryOutTypedDict] - - -class ListLibraryOut(BaseModel): - data: List[LibraryOut] diff --git a/src/mistralai/models/listsharingout.py b/src/mistralai/models/listsharingout.py deleted file mode 100644 index 38c0dbe0..00000000 --- a/src/mistralai/models/listsharingout.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .sharingout import SharingOut, SharingOutTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ListSharingOutTypedDict(TypedDict): - data: List[SharingOutTypedDict] - - -class ListSharingOut(BaseModel): - data: List[SharingOut] diff --git a/src/mistralai/models/messageentries.py b/src/mistralai/models/messageentries.py deleted file mode 100644 index 9b1706de..00000000 --- a/src/mistralai/models/messageentries.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict -from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict -from typing import Union -from typing_extensions import TypeAliasType - - -MessageEntriesTypedDict = TypeAliasType( - "MessageEntriesTypedDict", - Union[MessageInputEntryTypedDict, MessageOutputEntryTypedDict], -) - - -MessageEntries = TypeAliasType( - "MessageEntries", Union[MessageInputEntry, MessageOutputEntry] -) diff --git a/src/mistralai/models/messageinputcontentchunks.py b/src/mistralai/models/messageinputcontentchunks.py deleted file mode 100644 index e90d8aa0..00000000 --- a/src/mistralai/models/messageinputcontentchunks.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict -from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from .thinkchunk import ThinkChunk, ThinkChunkTypedDict -from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict -from typing import Union -from typing_extensions import TypeAliasType - - -MessageInputContentChunksTypedDict = TypeAliasType( - "MessageInputContentChunksTypedDict", - Union[ - TextChunkTypedDict, - ImageURLChunkTypedDict, - DocumentURLChunkTypedDict, - ThinkChunkTypedDict, - ToolFileChunkTypedDict, - ], -) - - -MessageInputContentChunks = TypeAliasType( - "MessageInputContentChunks", - Union[TextChunk, ImageURLChunk, DocumentURLChunk, ThinkChunk, ToolFileChunk], -) diff --git a/src/mistralai/models/messageinputentry.py b/src/mistralai/models/messageinputentry.py deleted file mode 100644 index edf05631..00000000 --- a/src/mistralai/models/messageinputentry.py +++ /dev/null @@ -1,105 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .messageinputcontentchunks import ( - MessageInputContentChunks, - MessageInputContentChunksTypedDict, -) -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -Object = Literal["entry",] - - -MessageInputEntryType = Literal["message.input",] - - -MessageInputEntryRole = Literal[ - "assistant", - "user", -] - - -MessageInputEntryContentTypedDict = TypeAliasType( - "MessageInputEntryContentTypedDict", - Union[str, List[MessageInputContentChunksTypedDict]], -) - - -MessageInputEntryContent = TypeAliasType( - "MessageInputEntryContent", Union[str, List[MessageInputContentChunks]] -) - - -class MessageInputEntryTypedDict(TypedDict): - r"""Representation of an input message inside the conversation.""" - - role: MessageInputEntryRole - content: MessageInputEntryContentTypedDict - object: NotRequired[Object] - type: NotRequired[MessageInputEntryType] - created_at: NotRequired[datetime] - completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] - prefix: NotRequired[bool] - - -class MessageInputEntry(BaseModel): - r"""Representation of an input message inside the conversation.""" - - role: MessageInputEntryRole - - content: MessageInputEntryContent - - object: Optional[Object] = "entry" - - type: Optional[MessageInputEntryType] = "message.input" - - created_at: Optional[datetime] = None - - completed_at: OptionalNullable[datetime] = UNSET - - id: Optional[str] = None - - prefix: Optional[bool] = False - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "type", - "created_at", - "completed_at", - "id", - "prefix", - ] - nullable_fields = ["completed_at"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/messageoutputcontentchunks.py b/src/mistralai/models/messageoutputcontentchunks.py deleted file mode 100644 index 136a7608..00000000 --- a/src/mistralai/models/messageoutputcontentchunks.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict -from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from .thinkchunk import ThinkChunk, ThinkChunkTypedDict -from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict -from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict -from typing import Union -from typing_extensions import TypeAliasType - - -MessageOutputContentChunksTypedDict = TypeAliasType( - "MessageOutputContentChunksTypedDict", - Union[ - TextChunkTypedDict, - ImageURLChunkTypedDict, - DocumentURLChunkTypedDict, - ThinkChunkTypedDict, - ToolFileChunkTypedDict, - ToolReferenceChunkTypedDict, - ], -) - - -MessageOutputContentChunks = TypeAliasType( - "MessageOutputContentChunks", - Union[ - TextChunk, - ImageURLChunk, - DocumentURLChunk, - ThinkChunk, - ToolFileChunk, - ToolReferenceChunk, - ], -) diff --git a/src/mistralai/models/messageoutputentry.py b/src/mistralai/models/messageoutputentry.py deleted file mode 100644 index 0e2df81e..00000000 --- a/src/mistralai/models/messageoutputentry.py +++ /dev/null @@ -1,103 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .messageoutputcontentchunks import ( - MessageOutputContentChunks, - MessageOutputContentChunksTypedDict, -) -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -MessageOutputEntryObject = Literal["entry",] - - -MessageOutputEntryType = Literal["message.output",] - - -MessageOutputEntryRole = Literal["assistant",] - - -MessageOutputEntryContentTypedDict = TypeAliasType( - "MessageOutputEntryContentTypedDict", - Union[str, List[MessageOutputContentChunksTypedDict]], -) - - -MessageOutputEntryContent = TypeAliasType( - "MessageOutputEntryContent", Union[str, List[MessageOutputContentChunks]] -) - - -class MessageOutputEntryTypedDict(TypedDict): - content: MessageOutputEntryContentTypedDict - object: NotRequired[MessageOutputEntryObject] - type: NotRequired[MessageOutputEntryType] - created_at: NotRequired[datetime] - completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] - agent_id: NotRequired[Nullable[str]] - model: NotRequired[Nullable[str]] - role: NotRequired[MessageOutputEntryRole] - - -class MessageOutputEntry(BaseModel): - content: MessageOutputEntryContent - - object: Optional[MessageOutputEntryObject] = "entry" - - type: Optional[MessageOutputEntryType] = "message.output" - - created_at: Optional[datetime] = None - - completed_at: OptionalNullable[datetime] = UNSET - - id: Optional[str] = None - - agent_id: OptionalNullable[str] = UNSET - - model: OptionalNullable[str] = UNSET - - role: Optional[MessageOutputEntryRole] = "assistant" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "type", - "created_at", - "completed_at", - "id", - "agent_id", - "model", - "role", - ] - nullable_fields = ["completed_at", "agent_id", "model"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/messageoutputevent.py b/src/mistralai/models/messageoutputevent.py deleted file mode 100644 index 751767a3..00000000 --- a/src/mistralai/models/messageoutputevent.py +++ /dev/null @@ -1,95 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -MessageOutputEventType = Literal["message.output.delta",] - - -MessageOutputEventRole = Literal["assistant",] - - -MessageOutputEventContentTypedDict = TypeAliasType( - "MessageOutputEventContentTypedDict", Union[str, OutputContentChunksTypedDict] -) - - -MessageOutputEventContent = TypeAliasType( - "MessageOutputEventContent", Union[str, OutputContentChunks] -) - - -class MessageOutputEventTypedDict(TypedDict): - id: str - content: MessageOutputEventContentTypedDict - type: NotRequired[MessageOutputEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - content_index: NotRequired[int] - model: NotRequired[Nullable[str]] - agent_id: NotRequired[Nullable[str]] - role: NotRequired[MessageOutputEventRole] - - -class MessageOutputEvent(BaseModel): - id: str - - content: MessageOutputEventContent - - type: Optional[MessageOutputEventType] = "message.output.delta" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 - - content_index: Optional[int] = 0 - - model: OptionalNullable[str] = UNSET - - agent_id: OptionalNullable[str] = UNSET - - role: Optional[MessageOutputEventRole] = "assistant" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "type", - "created_at", - "output_index", - "content_index", - "model", - "agent_id", - "role", - ] - nullable_fields = ["model", "agent_id"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/metricout.py b/src/mistralai/models/metricout.py deleted file mode 100644 index 930b5c21..00000000 --- a/src/mistralai/models/metricout.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class MetricOutTypedDict(TypedDict): - r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" - - train_loss: NotRequired[Nullable[float]] - valid_loss: NotRequired[Nullable[float]] - valid_mean_token_accuracy: NotRequired[Nullable[float]] - - -class MetricOut(BaseModel): - r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" - - train_loss: OptionalNullable[float] = UNSET - - valid_loss: OptionalNullable[float] = UNSET - - valid_mean_token_accuracy: OptionalNullable[float] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] - nullable_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/mistralerror.py b/src/mistralai/models/mistralerror.py deleted file mode 100644 index 28cfd22d..00000000 --- a/src/mistralai/models/mistralerror.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import httpx -from typing import Optional -from dataclasses import dataclass, field - - -@dataclass(unsafe_hash=True) -class MistralError(Exception): - """The base class for all HTTP error responses.""" - - message: str - status_code: int - body: str - headers: httpx.Headers = field(hash=False) - raw_response: httpx.Response = field(hash=False) - - def __init__( - self, message: str, raw_response: httpx.Response, body: Optional[str] = None - ): - object.__setattr__(self, "message", message) - object.__setattr__(self, "status_code", raw_response.status_code) - object.__setattr__( - self, "body", body if body is not None else raw_response.text - ) - object.__setattr__(self, "headers", raw_response.headers) - object.__setattr__(self, "raw_response", raw_response) - - def __str__(self): - return self.message diff --git a/src/mistralai/models/mistralpromptmode.py b/src/mistralai/models/mistralpromptmode.py deleted file mode 100644 index dfb6f2d2..00000000 --- a/src/mistralai/models/mistralpromptmode.py +++ /dev/null @@ -1,12 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] -r"""Available options to the prompt_mode argument on the chat completion endpoint. -Values represent high-level intent. Assignment to actual SPs is handled internally. -System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. -""" diff --git a/src/mistralai/models/modelcapabilities.py b/src/mistralai/models/modelcapabilities.py deleted file mode 100644 index 6edf8e5b..00000000 --- a/src/mistralai/models/modelcapabilities.py +++ /dev/null @@ -1,41 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ModelCapabilitiesTypedDict(TypedDict): - completion_chat: NotRequired[bool] - function_calling: NotRequired[bool] - completion_fim: NotRequired[bool] - fine_tuning: NotRequired[bool] - vision: NotRequired[bool] - ocr: NotRequired[bool] - classification: NotRequired[bool] - moderation: NotRequired[bool] - audio: NotRequired[bool] - audio_transcription: NotRequired[bool] - - -class ModelCapabilities(BaseModel): - completion_chat: Optional[bool] = False - - function_calling: Optional[bool] = False - - completion_fim: Optional[bool] = False - - fine_tuning: Optional[bool] = False - - vision: Optional[bool] = False - - ocr: Optional[bool] = False - - classification: Optional[bool] = False - - moderation: Optional[bool] = False - - audio: Optional[bool] = False - - audio_transcription: Optional[bool] = False diff --git a/src/mistralai/models/modelconversation.py b/src/mistralai/models/modelconversation.py deleted file mode 100644 index 8eca4f97..00000000 --- a/src/mistralai/models/modelconversation.py +++ /dev/null @@ -1,133 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict -from .functiontool import FunctionTool, FunctionToolTypedDict -from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict -from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict -from .websearchtool import WebSearchTool, WebSearchToolTypedDict -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -ModelConversationToolsTypedDict = TypeAliasType( - "ModelConversationToolsTypedDict", - Union[ - WebSearchToolTypedDict, - WebSearchPremiumToolTypedDict, - CodeInterpreterToolTypedDict, - ImageGenerationToolTypedDict, - FunctionToolTypedDict, - DocumentLibraryToolTypedDict, - ], -) - - -ModelConversationTools = Annotated[ - Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -ModelConversationObject = Literal["conversation",] - - -class ModelConversationTypedDict(TypedDict): - id: str - created_at: datetime - updated_at: datetime - model: str - instructions: NotRequired[Nullable[str]] - r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[ModelConversationToolsTypedDict]] - r"""List of tools which are available to the model during the conversation.""" - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - name: NotRequired[Nullable[str]] - r"""Name given to the conversation.""" - description: NotRequired[Nullable[str]] - r"""Description of the what the conversation is about.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - r"""Custom metadata for the conversation.""" - object: NotRequired[ModelConversationObject] - - -class ModelConversation(BaseModel): - id: str - - created_at: datetime - - updated_at: datetime - - model: str - - instructions: OptionalNullable[str] = UNSET - r"""Instruction prompt the model will follow during the conversation.""" - - tools: Optional[List[ModelConversationTools]] = None - r"""List of tools which are available to the model during the conversation.""" - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" - - name: OptionalNullable[str] = UNSET - r"""Name given to the conversation.""" - - description: OptionalNullable[str] = UNSET - r"""Description of the what the conversation is about.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - r"""Custom metadata for the conversation.""" - - object: Optional[ModelConversationObject] = "conversation" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "name", - "description", - "metadata", - "object", - ] - nullable_fields = ["instructions", "name", "description", "metadata"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/modellist.py b/src/mistralai/models/modellist.py deleted file mode 100644 index 394cb3fa..00000000 --- a/src/mistralai/models/modellist.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .basemodelcard import BaseModelCard, BaseModelCardTypedDict -from .ftmodelcard import FTModelCard, FTModelCardTypedDict -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -DataTypedDict = TypeAliasType( - "DataTypedDict", Union[BaseModelCardTypedDict, FTModelCardTypedDict] -) - - -Data = Annotated[ - Union[ - Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")] - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -class ModelListTypedDict(TypedDict): - object: NotRequired[str] - data: NotRequired[List[DataTypedDict]] - - -class ModelList(BaseModel): - object: Optional[str] = "list" - - data: Optional[List[Data]] = None diff --git a/src/mistralai/models/moderationobject.py b/src/mistralai/models/moderationobject.py deleted file mode 100644 index 5eff2d2a..00000000 --- a/src/mistralai/models/moderationobject.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Dict, Optional -from typing_extensions import NotRequired, TypedDict - - -class ModerationObjectTypedDict(TypedDict): - categories: NotRequired[Dict[str, bool]] - r"""Moderation result thresholds""" - category_scores: NotRequired[Dict[str, float]] - r"""Moderation result""" - - -class ModerationObject(BaseModel): - categories: Optional[Dict[str, bool]] = None - r"""Moderation result thresholds""" - - category_scores: Optional[Dict[str, float]] = None - r"""Moderation result""" diff --git a/src/mistralai/models/moderationresponse.py b/src/mistralai/models/moderationresponse.py deleted file mode 100644 index ed13cd6b..00000000 --- a/src/mistralai/models/moderationresponse.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .moderationobject import ModerationObject, ModerationObjectTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ModerationResponseTypedDict(TypedDict): - id: str - model: str - results: List[ModerationObjectTypedDict] - - -class ModerationResponse(BaseModel): - id: str - - model: str - - results: List[ModerationObject] diff --git a/src/mistralai/models/no_response_error.py b/src/mistralai/models/no_response_error.py deleted file mode 100644 index 1deab64b..00000000 --- a/src/mistralai/models/no_response_error.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from dataclasses import dataclass - - -@dataclass(unsafe_hash=True) -class NoResponseError(Exception): - """Error raised when no HTTP response is received from the server.""" - - message: str - - def __init__(self, message: str = "No response received"): - object.__setattr__(self, "message", message) - super().__init__(message) - - def __str__(self): - return self.message diff --git a/src/mistralai/models/ocrimageobject.py b/src/mistralai/models/ocrimageobject.py deleted file mode 100644 index cec0acf4..00000000 --- a/src/mistralai/models/ocrimageobject.py +++ /dev/null @@ -1,83 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class OCRImageObjectTypedDict(TypedDict): - id: str - r"""Image ID for extracted image in a page""" - top_left_x: Nullable[int] - r"""X coordinate of top-left corner of the extracted image""" - top_left_y: Nullable[int] - r"""Y coordinate of top-left corner of the extracted image""" - bottom_right_x: Nullable[int] - r"""X coordinate of bottom-right corner of the extracted image""" - bottom_right_y: Nullable[int] - r"""Y coordinate of bottom-right corner of the extracted image""" - image_base64: NotRequired[Nullable[str]] - r"""Base64 string of the extracted image""" - image_annotation: NotRequired[Nullable[str]] - r"""Annotation of the extracted image in json str""" - - -class OCRImageObject(BaseModel): - id: str - r"""Image ID for extracted image in a page""" - - top_left_x: Nullable[int] - r"""X coordinate of top-left corner of the extracted image""" - - top_left_y: Nullable[int] - r"""Y coordinate of top-left corner of the extracted image""" - - bottom_right_x: Nullable[int] - r"""X coordinate of bottom-right corner of the extracted image""" - - bottom_right_y: Nullable[int] - r"""Y coordinate of bottom-right corner of the extracted image""" - - image_base64: OptionalNullable[str] = UNSET - r"""Base64 string of the extracted image""" - - image_annotation: OptionalNullable[str] = UNSET - r"""Annotation of the extracted image in json str""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["image_base64", "image_annotation"] - nullable_fields = [ - "top_left_x", - "top_left_y", - "bottom_right_x", - "bottom_right_y", - "image_base64", - "image_annotation", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/ocrpagedimensions.py b/src/mistralai/models/ocrpagedimensions.py deleted file mode 100644 index d1aeb54d..00000000 --- a/src/mistralai/models/ocrpagedimensions.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class OCRPageDimensionsTypedDict(TypedDict): - dpi: int - r"""Dots per inch of the page-image""" - height: int - r"""Height of the image in pixels""" - width: int - r"""Width of the image in pixels""" - - -class OCRPageDimensions(BaseModel): - dpi: int - r"""Dots per inch of the page-image""" - - height: int - r"""Height of the image in pixels""" - - width: int - r"""Width of the image in pixels""" diff --git a/src/mistralai/models/ocrpageobject.py b/src/mistralai/models/ocrpageobject.py deleted file mode 100644 index 737defba..00000000 --- a/src/mistralai/models/ocrpageobject.py +++ /dev/null @@ -1,85 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict -from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict -from .ocrtableobject import OCRTableObject, OCRTableObjectTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict - - -class OCRPageObjectTypedDict(TypedDict): - index: int - r"""The page index in a pdf document starting from 0""" - markdown: str - r"""The markdown string response of the page""" - images: List[OCRImageObjectTypedDict] - r"""List of all extracted images in the page""" - dimensions: Nullable[OCRPageDimensionsTypedDict] - r"""The dimensions of the PDF Page's screenshot image""" - tables: NotRequired[List[OCRTableObjectTypedDict]] - r"""List of all extracted tables in the page""" - hyperlinks: NotRequired[List[str]] - r"""List of all hyperlinks in the page""" - header: NotRequired[Nullable[str]] - r"""Header of the page""" - footer: NotRequired[Nullable[str]] - r"""Footer of the page""" - - -class OCRPageObject(BaseModel): - index: int - r"""The page index in a pdf document starting from 0""" - - markdown: str - r"""The markdown string response of the page""" - - images: List[OCRImageObject] - r"""List of all extracted images in the page""" - - dimensions: Nullable[OCRPageDimensions] - r"""The dimensions of the PDF Page's screenshot image""" - - tables: Optional[List[OCRTableObject]] = None - r"""List of all extracted tables in the page""" - - hyperlinks: Optional[List[str]] = None - r"""List of all hyperlinks in the page""" - - header: OptionalNullable[str] = UNSET - r"""Header of the page""" - - footer: OptionalNullable[str] = UNSET - r"""Footer of the page""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["tables", "hyperlinks", "header", "footer"] - nullable_fields = ["header", "footer", "dimensions"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/ocrrequest.py b/src/mistralai/models/ocrrequest.py deleted file mode 100644 index 0e061ac9..00000000 --- a/src/mistralai/models/ocrrequest.py +++ /dev/null @@ -1,140 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict -from .filechunk import FileChunk, FileChunkTypedDict -from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -DocumentTypedDict = TypeAliasType( - "DocumentTypedDict", - Union[FileChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict], -) -r"""Document to run OCR on""" - - -Document = TypeAliasType("Document", Union[FileChunk, ImageURLChunk, DocumentURLChunk]) -r"""Document to run OCR on""" - - -TableFormat = Literal[ - "markdown", - "html", -] - - -class OCRRequestTypedDict(TypedDict): - model: Nullable[str] - document: DocumentTypedDict - r"""Document to run OCR on""" - id: NotRequired[str] - pages: NotRequired[Nullable[List[int]]] - r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" - include_image_base64: NotRequired[Nullable[bool]] - r"""Include image URLs in response""" - image_limit: NotRequired[Nullable[int]] - r"""Max images to extract""" - image_min_size: NotRequired[Nullable[int]] - r"""Minimum height and width of image to extract""" - bbox_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] - r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" - document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] - r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" - document_annotation_prompt: NotRequired[Nullable[str]] - r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" - table_format: NotRequired[Nullable[TableFormat]] - extract_header: NotRequired[bool] - extract_footer: NotRequired[bool] - - -class OCRRequest(BaseModel): - model: Nullable[str] - - document: Document - r"""Document to run OCR on""" - - id: Optional[str] = None - - pages: OptionalNullable[List[int]] = UNSET - r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" - - include_image_base64: OptionalNullable[bool] = UNSET - r"""Include image URLs in response""" - - image_limit: OptionalNullable[int] = UNSET - r"""Max images to extract""" - - image_min_size: OptionalNullable[int] = UNSET - r"""Minimum height and width of image to extract""" - - bbox_annotation_format: OptionalNullable[ResponseFormat] = UNSET - r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" - - document_annotation_format: OptionalNullable[ResponseFormat] = UNSET - r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" - - document_annotation_prompt: OptionalNullable[str] = UNSET - r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" - - table_format: OptionalNullable[TableFormat] = UNSET - - extract_header: Optional[bool] = None - - extract_footer: Optional[bool] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "id", - "pages", - "include_image_base64", - "image_limit", - "image_min_size", - "bbox_annotation_format", - "document_annotation_format", - "document_annotation_prompt", - "table_format", - "extract_header", - "extract_footer", - ] - nullable_fields = [ - "model", - "pages", - "include_image_base64", - "image_limit", - "image_min_size", - "bbox_annotation_format", - "document_annotation_format", - "document_annotation_prompt", - "table_format", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/ocrresponse.py b/src/mistralai/models/ocrresponse.py deleted file mode 100644 index 7b65bee7..00000000 --- a/src/mistralai/models/ocrresponse.py +++ /dev/null @@ -1,62 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict -from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List -from typing_extensions import NotRequired, TypedDict - - -class OCRResponseTypedDict(TypedDict): - pages: List[OCRPageObjectTypedDict] - r"""List of OCR info for pages.""" - model: str - r"""The model used to generate the OCR.""" - usage_info: OCRUsageInfoTypedDict - document_annotation: NotRequired[Nullable[str]] - r"""Formatted response in the request_format if provided in json str""" - - -class OCRResponse(BaseModel): - pages: List[OCRPageObject] - r"""List of OCR info for pages.""" - - model: str - r"""The model used to generate the OCR.""" - - usage_info: OCRUsageInfo - - document_annotation: OptionalNullable[str] = UNSET - r"""Formatted response in the request_format if provided in json str""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["document_annotation"] - nullable_fields = ["document_annotation"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/ocrtableobject.py b/src/mistralai/models/ocrtableobject.py deleted file mode 100644 index 5f30ab5e..00000000 --- a/src/mistralai/models/ocrtableobject.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -import pydantic -from typing import Literal -from typing_extensions import Annotated, TypedDict - - -Format = Literal[ - "markdown", - "html", -] -r"""Format of the table""" - - -class OCRTableObjectTypedDict(TypedDict): - id: str - r"""Table ID for extracted table in a page""" - content: str - r"""Content of the table in the given format""" - format_: Format - r"""Format of the table""" - - -class OCRTableObject(BaseModel): - id: str - r"""Table ID for extracted table in a page""" - - content: str - r"""Content of the table in the given format""" - - format_: Annotated[Format, pydantic.Field(alias="format")] - r"""Format of the table""" diff --git a/src/mistralai/models/ocrusageinfo.py b/src/mistralai/models/ocrusageinfo.py deleted file mode 100644 index 36c9f826..00000000 --- a/src/mistralai/models/ocrusageinfo.py +++ /dev/null @@ -1,51 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class OCRUsageInfoTypedDict(TypedDict): - pages_processed: int - r"""Number of pages processed""" - doc_size_bytes: NotRequired[Nullable[int]] - r"""Document size in bytes""" - - -class OCRUsageInfo(BaseModel): - pages_processed: int - r"""Number of pages processed""" - - doc_size_bytes: OptionalNullable[int] = UNSET - r"""Document size in bytes""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["doc_size_bytes"] - nullable_fields = ["doc_size_bytes"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/outputcontentchunks.py b/src/mistralai/models/outputcontentchunks.py deleted file mode 100644 index ad0c087e..00000000 --- a/src/mistralai/models/outputcontentchunks.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict -from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from .thinkchunk import ThinkChunk, ThinkChunkTypedDict -from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict -from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict -from typing import Union -from typing_extensions import TypeAliasType - - -OutputContentChunksTypedDict = TypeAliasType( - "OutputContentChunksTypedDict", - Union[ - TextChunkTypedDict, - ImageURLChunkTypedDict, - DocumentURLChunkTypedDict, - ThinkChunkTypedDict, - ToolFileChunkTypedDict, - ToolReferenceChunkTypedDict, - ], -) - - -OutputContentChunks = TypeAliasType( - "OutputContentChunks", - Union[ - TextChunk, - ImageURLChunk, - DocumentURLChunk, - ThinkChunk, - ToolFileChunk, - ToolReferenceChunk, - ], -) diff --git a/src/mistralai/models/paginationinfo.py b/src/mistralai/models/paginationinfo.py deleted file mode 100644 index 00d4f1ec..00000000 --- a/src/mistralai/models/paginationinfo.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class PaginationInfoTypedDict(TypedDict): - total_items: int - total_pages: int - current_page: int - page_size: int - has_more: bool - - -class PaginationInfo(BaseModel): - total_items: int - - total_pages: int - - current_page: int - - page_size: int - - has_more: bool diff --git a/src/mistralai/models/prediction.py b/src/mistralai/models/prediction.py deleted file mode 100644 index 582d8789..00000000 --- a/src/mistralai/models/prediction.py +++ /dev/null @@ -1,29 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class PredictionTypedDict(TypedDict): - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - - type: Literal["content"] - content: NotRequired[str] - - -class Prediction(BaseModel): - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - - TYPE: Annotated[ - Annotated[ - Optional[Literal["content"]], AfterValidator(validate_const("content")) - ], - pydantic.Field(alias="type"), - ] = "content" - - content: Optional[str] = "" diff --git a/src/mistralai/models/processingstatusout.py b/src/mistralai/models/processingstatusout.py deleted file mode 100644 index e67bfa86..00000000 --- a/src/mistralai/models/processingstatusout.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class ProcessingStatusOutTypedDict(TypedDict): - document_id: str - processing_status: str - - -class ProcessingStatusOut(BaseModel): - document_id: str - - processing_status: str diff --git a/src/mistralai/models/realtimetranscriptionerror.py b/src/mistralai/models/realtimetranscriptionerror.py deleted file mode 100644 index 0785f700..00000000 --- a/src/mistralai/models/realtimetranscriptionerror.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .realtimetranscriptionerrordetail import ( - RealtimeTranscriptionErrorDetail, - RealtimeTranscriptionErrorDetailTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, TypedDict - - -class RealtimeTranscriptionErrorTypedDict(TypedDict): - error: RealtimeTranscriptionErrorDetailTypedDict - type: Literal["error"] - - -class RealtimeTranscriptionError(BaseModel): - error: RealtimeTranscriptionErrorDetail - - TYPE: Annotated[ - Annotated[Optional[Literal["error"]], AfterValidator(validate_const("error"))], - pydantic.Field(alias="type"), - ] = "error" diff --git a/src/mistralai/models/realtimetranscriptionerrordetail.py b/src/mistralai/models/realtimetranscriptionerrordetail.py deleted file mode 100644 index cb5d73f8..00000000 --- a/src/mistralai/models/realtimetranscriptionerrordetail.py +++ /dev/null @@ -1,29 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Any, Dict, Union -from typing_extensions import TypeAliasType, TypedDict - - -MessageTypedDict = TypeAliasType("MessageTypedDict", Union[str, Dict[str, Any]]) -r"""Human-readable error message.""" - - -Message = TypeAliasType("Message", Union[str, Dict[str, Any]]) -r"""Human-readable error message.""" - - -class RealtimeTranscriptionErrorDetailTypedDict(TypedDict): - message: MessageTypedDict - r"""Human-readable error message.""" - code: int - r"""Internal error code for debugging.""" - - -class RealtimeTranscriptionErrorDetail(BaseModel): - message: Message - r"""Human-readable error message.""" - - code: int - r"""Internal error code for debugging.""" diff --git a/src/mistralai/models/realtimetranscriptionsession.py b/src/mistralai/models/realtimetranscriptionsession.py deleted file mode 100644 index bcd0cfe3..00000000 --- a/src/mistralai/models/realtimetranscriptionsession.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .audioformat import AudioFormat, AudioFormatTypedDict -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class RealtimeTranscriptionSessionTypedDict(TypedDict): - request_id: str - model: str - audio_format: AudioFormatTypedDict - - -class RealtimeTranscriptionSession(BaseModel): - request_id: str - - model: str - - audio_format: AudioFormat diff --git a/src/mistralai/models/realtimetranscriptionsessioncreated.py b/src/mistralai/models/realtimetranscriptionsessioncreated.py deleted file mode 100644 index 9a2c2860..00000000 --- a/src/mistralai/models/realtimetranscriptionsessioncreated.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .realtimetranscriptionsession import ( - RealtimeTranscriptionSession, - RealtimeTranscriptionSessionTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, TypedDict - - -class RealtimeTranscriptionSessionCreatedTypedDict(TypedDict): - session: RealtimeTranscriptionSessionTypedDict - type: Literal["session.created"] - - -class RealtimeTranscriptionSessionCreated(BaseModel): - session: RealtimeTranscriptionSession - - TYPE: Annotated[ - Annotated[ - Optional[Literal["session.created"]], - AfterValidator(validate_const("session.created")), - ], - pydantic.Field(alias="type"), - ] = "session.created" diff --git a/src/mistralai/models/realtimetranscriptionsessionupdated.py b/src/mistralai/models/realtimetranscriptionsessionupdated.py deleted file mode 100644 index ad1b5133..00000000 --- a/src/mistralai/models/realtimetranscriptionsessionupdated.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .realtimetranscriptionsession import ( - RealtimeTranscriptionSession, - RealtimeTranscriptionSessionTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, TypedDict - - -class RealtimeTranscriptionSessionUpdatedTypedDict(TypedDict): - session: RealtimeTranscriptionSessionTypedDict - type: Literal["session.updated"] - - -class RealtimeTranscriptionSessionUpdated(BaseModel): - session: RealtimeTranscriptionSession - - TYPE: Annotated[ - Annotated[ - Optional[Literal["session.updated"]], - AfterValidator(validate_const("session.updated")), - ], - pydantic.Field(alias="type"), - ] = "session.updated" diff --git a/src/mistralai/models/referencechunk.py b/src/mistralai/models/referencechunk.py deleted file mode 100644 index 1864ac79..00000000 --- a/src/mistralai/models/referencechunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ReferenceChunkType = Literal["reference",] - - -class ReferenceChunkTypedDict(TypedDict): - reference_ids: List[int] - type: NotRequired[ReferenceChunkType] - - -class ReferenceChunk(BaseModel): - reference_ids: List[int] - - type: Optional[ReferenceChunkType] = "reference" diff --git a/src/mistralai/models/requestsource.py b/src/mistralai/models/requestsource.py deleted file mode 100644 index 7b0a35c4..00000000 --- a/src/mistralai/models/requestsource.py +++ /dev/null @@ -1,11 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -RequestSource = Literal[ - "api", - "playground", - "agent_builder_v1", -] diff --git a/src/mistralai/models/responsedoneevent.py b/src/mistralai/models/responsedoneevent.py deleted file mode 100644 index 5a3a3dfb..00000000 --- a/src/mistralai/models/responsedoneevent.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ResponseDoneEventType = Literal["conversation.response.done",] - - -class ResponseDoneEventTypedDict(TypedDict): - usage: ConversationUsageInfoTypedDict - type: NotRequired[ResponseDoneEventType] - created_at: NotRequired[datetime] - - -class ResponseDoneEvent(BaseModel): - usage: ConversationUsageInfo - - type: Optional[ResponseDoneEventType] = "conversation.response.done" - - created_at: Optional[datetime] = None diff --git a/src/mistralai/models/responseerrorevent.py b/src/mistralai/models/responseerrorevent.py deleted file mode 100644 index 6cb1b268..00000000 --- a/src/mistralai/models/responseerrorevent.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ResponseErrorEventType = Literal["conversation.response.error",] - - -class ResponseErrorEventTypedDict(TypedDict): - message: str - code: int - type: NotRequired[ResponseErrorEventType] - created_at: NotRequired[datetime] - - -class ResponseErrorEvent(BaseModel): - message: str - - code: int - - type: Optional[ResponseErrorEventType] = "conversation.response.error" - - created_at: Optional[datetime] = None diff --git a/src/mistralai/models/responseformat.py b/src/mistralai/models/responseformat.py deleted file mode 100644 index 92284017..00000000 --- a/src/mistralai/models/responseformat.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .jsonschema import JSONSchema, JSONSchemaTypedDict -from .responseformats import ResponseFormats -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ResponseFormatTypedDict(TypedDict): - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - - type: NotRequired[ResponseFormats] - json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] - - -class ResponseFormat(BaseModel): - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - - type: Optional[ResponseFormats] = None - - json_schema: OptionalNullable[JSONSchema] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "json_schema"] - nullable_fields = ["json_schema"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/responseformats.py b/src/mistralai/models/responseformats.py deleted file mode 100644 index cbf83ce7..00000000 --- a/src/mistralai/models/responseformats.py +++ /dev/null @@ -1,11 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -ResponseFormats = Literal[ - "text", - "json_object", - "json_schema", -] diff --git a/src/mistralai/models/responsestartedevent.py b/src/mistralai/models/responsestartedevent.py deleted file mode 100644 index d14d45ef..00000000 --- a/src/mistralai/models/responsestartedevent.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ResponseStartedEventType = Literal["conversation.response.started",] - - -class ResponseStartedEventTypedDict(TypedDict): - conversation_id: str - type: NotRequired[ResponseStartedEventType] - created_at: NotRequired[datetime] - - -class ResponseStartedEvent(BaseModel): - conversation_id: str - - type: Optional[ResponseStartedEventType] = "conversation.response.started" - - created_at: Optional[datetime] = None diff --git a/src/mistralai/models/responsevalidationerror.py b/src/mistralai/models/responsevalidationerror.py deleted file mode 100644 index ed301655..00000000 --- a/src/mistralai/models/responsevalidationerror.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import httpx -from typing import Optional -from dataclasses import dataclass - -from mistralai.models import MistralError - - -@dataclass(unsafe_hash=True) -class ResponseValidationError(MistralError): - """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" - - def __init__( - self, - message: str, - raw_response: httpx.Response, - cause: Exception, - body: Optional[str] = None, - ): - message = f"{message}: {cause}" - super().__init__(message, raw_response, body) - - @property - def cause(self): - """Normally the Pydantic ValidationError""" - return self.__cause__ diff --git a/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py deleted file mode 100644 index bfe62474..00000000 --- a/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .basemodelcard import BaseModelCard, BaseModelCardTypedDict -from .ftmodelcard import FTModelCard, FTModelCardTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class RetrieveModelV1ModelsModelIDGetRequestTypedDict(TypedDict): - model_id: str - r"""The ID of the model to retrieve.""" - - -class RetrieveModelV1ModelsModelIDGetRequest(BaseModel): - model_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the model to retrieve.""" - - -RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict = TypeAliasType( - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", - Union[BaseModelCardTypedDict, FTModelCardTypedDict], -) -r"""Successful Response""" - - -RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet = Annotated[ - Union[ - Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")] - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] -r"""Successful Response""" diff --git a/src/mistralai/models/retrievefileout.py b/src/mistralai/models/retrievefileout.py deleted file mode 100644 index 94540083..00000000 --- a/src/mistralai/models/retrievefileout.py +++ /dev/null @@ -1,91 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .filepurpose import FilePurpose -from .sampletype import SampleType -from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import model_serializer -from typing_extensions import Annotated, NotRequired, TypedDict - - -class RetrieveFileOutTypedDict(TypedDict): - id: str - r"""The unique identifier of the file.""" - object: str - r"""The object type, which is always \"file\".""" - size_bytes: int - r"""The size of the file, in bytes.""" - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - filename: str - r"""The name of the uploaded file.""" - purpose: FilePurpose - sample_type: SampleType - source: Source - deleted: bool - num_lines: NotRequired[Nullable[int]] - mimetype: NotRequired[Nullable[str]] - signature: NotRequired[Nullable[str]] - - -class RetrieveFileOut(BaseModel): - id: str - r"""The unique identifier of the file.""" - - object: str - r"""The object type, which is always \"file\".""" - - size_bytes: Annotated[int, pydantic.Field(alias="bytes")] - r"""The size of the file, in bytes.""" - - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - - filename: str - r"""The name of the uploaded file.""" - - purpose: FilePurpose - - sample_type: SampleType - - source: Source - - deleted: bool - - num_lines: OptionalNullable[int] = UNSET - - mimetype: OptionalNullable[str] = UNSET - - signature: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["num_lines", "mimetype", "signature"] - nullable_fields = ["num_lines", "mimetype", "signature"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/sampletype.py b/src/mistralai/models/sampletype.py deleted file mode 100644 index efb43e9b..00000000 --- a/src/mistralai/models/sampletype.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -SampleType = Union[ - Literal[ - "pretrain", - "instruct", - "batch_request", - "batch_result", - "batch_error", - ], - UnrecognizedStr, -] diff --git a/src/mistralai/models/sdkerror.py b/src/mistralai/models/sdkerror.py deleted file mode 100644 index 65c45cf1..00000000 --- a/src/mistralai/models/sdkerror.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import httpx -from typing import Optional -from dataclasses import dataclass - -from mistralai.models import MistralError - -MAX_MESSAGE_LEN = 10_000 - - -@dataclass(unsafe_hash=True) -class SDKError(MistralError): - """The fallback error class if no more specific error class is matched.""" - - def __init__( - self, message: str, raw_response: httpx.Response, body: Optional[str] = None - ): - body_display = body or raw_response.text or '""' - - if message: - message += ": " - message += f"Status {raw_response.status_code}" - - headers = raw_response.headers - content_type = headers.get("content-type", '""') - if content_type != "application/json": - if " " in content_type: - content_type = f'"{content_type}"' - message += f" Content-Type {content_type}" - - if len(body_display) > MAX_MESSAGE_LEN: - truncated = body_display[:MAX_MESSAGE_LEN] - remaining = len(body_display) - MAX_MESSAGE_LEN - body_display = f"{truncated}...and {remaining} more chars" - - message += f". Body: {body_display}" - message = message.strip() - - super().__init__(message, raw_response, body) diff --git a/src/mistralai/models/security.py b/src/mistralai/models/security.py deleted file mode 100644 index cf05ba8f..00000000 --- a/src/mistralai/models/security.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, SecurityMetadata -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class SecurityTypedDict(TypedDict): - api_key: NotRequired[str] - - -class Security(BaseModel): - api_key: Annotated[ - Optional[str], - FieldMetadata( - security=SecurityMetadata( - scheme=True, - scheme_type="http", - sub_type="bearer", - field_name="Authorization", - ) - ), - ] = None diff --git a/src/mistralai/models/shareenum.py b/src/mistralai/models/shareenum.py deleted file mode 100644 index 634ba4b7..00000000 --- a/src/mistralai/models/shareenum.py +++ /dev/null @@ -1,14 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -ShareEnum = Union[ - Literal[ - "Viewer", - "Editor", - ], - UnrecognizedStr, -] diff --git a/src/mistralai/models/sharingdelete.py b/src/mistralai/models/sharingdelete.py deleted file mode 100644 index ebcdbab5..00000000 --- a/src/mistralai/models/sharingdelete.py +++ /dev/null @@ -1,55 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .entitytype import EntityType -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class SharingDeleteTypedDict(TypedDict): - share_with_uuid: str - r"""The id of the entity (user, workspace or organization) to share with""" - share_with_type: EntityType - r"""The type of entity, used to share a library.""" - org_id: NotRequired[Nullable[str]] - - -class SharingDelete(BaseModel): - share_with_uuid: str - r"""The id of the entity (user, workspace or organization) to share with""" - - share_with_type: EntityType - r"""The type of entity, used to share a library.""" - - org_id: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["org_id"] - nullable_fields = ["org_id"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/sharingin.py b/src/mistralai/models/sharingin.py deleted file mode 100644 index f7bb89ca..00000000 --- a/src/mistralai/models/sharingin.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .entitytype import EntityType -from .shareenum import ShareEnum -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class SharingInTypedDict(TypedDict): - level: ShareEnum - share_with_uuid: str - r"""The id of the entity (user, workspace or organization) to share with""" - share_with_type: EntityType - r"""The type of entity, used to share a library.""" - org_id: NotRequired[Nullable[str]] - - -class SharingIn(BaseModel): - level: ShareEnum - - share_with_uuid: str - r"""The id of the entity (user, workspace or organization) to share with""" - - share_with_type: EntityType - r"""The type of entity, used to share a library.""" - - org_id: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["org_id"] - nullable_fields = ["org_id"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/sharingout.py b/src/mistralai/models/sharingout.py deleted file mode 100644 index 12455818..00000000 --- a/src/mistralai/models/sharingout.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class SharingOutTypedDict(TypedDict): - library_id: str - org_id: str - role: str - share_with_type: str - share_with_uuid: Nullable[str] - user_id: NotRequired[Nullable[str]] - - -class SharingOut(BaseModel): - library_id: str - - org_id: str - - role: str - - share_with_type: str - - share_with_uuid: Nullable[str] - - user_id: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["user_id"] - nullable_fields = ["user_id", "share_with_uuid"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/source.py b/src/mistralai/models/source.py deleted file mode 100644 index cc3abce2..00000000 --- a/src/mistralai/models/source.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -Source = Union[ - Literal[ - "upload", - "repository", - "mistral", - ], - UnrecognizedStr, -] diff --git a/src/mistralai/models/ssetypes.py b/src/mistralai/models/ssetypes.py deleted file mode 100644 index 796f0327..00000000 --- a/src/mistralai/models/ssetypes.py +++ /dev/null @@ -1,19 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -SSETypes = Literal[ - "conversation.response.started", - "conversation.response.done", - "conversation.response.error", - "message.output.delta", - "tool.execution.started", - "tool.execution.delta", - "tool.execution.done", - "agent.handoff.started", - "agent.handoff.done", - "function.call.delta", -] -r"""Server side events sent when streaming a conversation response.""" diff --git a/src/mistralai/models/systemmessage.py b/src/mistralai/models/systemmessage.py deleted file mode 100644 index 2b34607b..00000000 --- a/src/mistralai/models/systemmessage.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .systemmessagecontentchunks import ( - SystemMessageContentChunks, - SystemMessageContentChunksTypedDict, -) -from mistralai.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -SystemMessageContentTypedDict = TypeAliasType( - "SystemMessageContentTypedDict", - Union[str, List[SystemMessageContentChunksTypedDict]], -) - - -SystemMessageContent = TypeAliasType( - "SystemMessageContent", Union[str, List[SystemMessageContentChunks]] -) - - -Role = Literal["system",] - - -class SystemMessageTypedDict(TypedDict): - content: SystemMessageContentTypedDict - role: NotRequired[Role] - - -class SystemMessage(BaseModel): - content: SystemMessageContent - - role: Optional[Role] = "system" diff --git a/src/mistralai/models/systemmessagecontentchunks.py b/src/mistralai/models/systemmessagecontentchunks.py deleted file mode 100644 index a1f04d1e..00000000 --- a/src/mistralai/models/systemmessagecontentchunks.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .textchunk import TextChunk, TextChunkTypedDict -from .thinkchunk import ThinkChunk, ThinkChunkTypedDict -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType - - -SystemMessageContentChunksTypedDict = TypeAliasType( - "SystemMessageContentChunksTypedDict", - Union[TextChunkTypedDict, ThinkChunkTypedDict], -) - - -SystemMessageContentChunks = Annotated[ - Union[Annotated[TextChunk, Tag("text")], Annotated[ThinkChunk, Tag("thinking")]], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] diff --git a/src/mistralai/models/textchunk.py b/src/mistralai/models/textchunk.py deleted file mode 100644 index 6052686e..00000000 --- a/src/mistralai/models/textchunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TextChunkType = Literal["text",] - - -class TextChunkTypedDict(TypedDict): - text: str - type: NotRequired[TextChunkType] - - -class TextChunk(BaseModel): - text: str - - type: Optional[TextChunkType] = "text" diff --git a/src/mistralai/models/thinkchunk.py b/src/mistralai/models/thinkchunk.py deleted file mode 100644 index 627ae488..00000000 --- a/src/mistralai/models/thinkchunk.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from mistralai.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ThinkingTypedDict = TypeAliasType( - "ThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] -) - - -Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) - - -ThinkChunkType = Literal["thinking",] - - -class ThinkChunkTypedDict(TypedDict): - thinking: List[ThinkingTypedDict] - closed: NotRequired[bool] - r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" - type: NotRequired[ThinkChunkType] - - -class ThinkChunk(BaseModel): - thinking: List[Thinking] - - closed: Optional[bool] = None - r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" - - type: Optional[ThinkChunkType] = "thinking" diff --git a/src/mistralai/models/timestampgranularity.py b/src/mistralai/models/timestampgranularity.py deleted file mode 100644 index 5bda890f..00000000 --- a/src/mistralai/models/timestampgranularity.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -TimestampGranularity = Literal[ - "segment", - "word", -] diff --git a/src/mistralai/models/tool.py b/src/mistralai/models/tool.py deleted file mode 100644 index b14a6adf..00000000 --- a/src/mistralai/models/tool.py +++ /dev/null @@ -1,19 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .function import Function, FunctionTypedDict -from .tooltypes import ToolTypes -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ToolTypedDict(TypedDict): - function: FunctionTypedDict - type: NotRequired[ToolTypes] - - -class Tool(BaseModel): - function: Function - - type: Optional[ToolTypes] = None diff --git a/src/mistralai/models/toolcall.py b/src/mistralai/models/toolcall.py deleted file mode 100644 index 1f367924..00000000 --- a/src/mistralai/models/toolcall.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .functioncall import FunctionCall, FunctionCallTypedDict -from .tooltypes import ToolTypes -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ToolCallTypedDict(TypedDict): - function: FunctionCallTypedDict - id: NotRequired[str] - type: NotRequired[ToolTypes] - index: NotRequired[int] - - -class ToolCall(BaseModel): - function: FunctionCall - - id: Optional[str] = "null" - - type: Optional[ToolTypes] = None - - index: Optional[int] = 0 diff --git a/src/mistralai/models/toolchoice.py b/src/mistralai/models/toolchoice.py deleted file mode 100644 index f8e1b486..00000000 --- a/src/mistralai/models/toolchoice.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .functionname import FunctionName, FunctionNameTypedDict -from .tooltypes import ToolTypes -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ToolChoiceTypedDict(TypedDict): - r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" - - function: FunctionNameTypedDict - r"""this restriction of `Function` is used to select a specific function to call""" - type: NotRequired[ToolTypes] - - -class ToolChoice(BaseModel): - r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" - - function: FunctionName - r"""this restriction of `Function` is used to select a specific function to call""" - - type: Optional[ToolTypes] = None diff --git a/src/mistralai/models/toolchoiceenum.py b/src/mistralai/models/toolchoiceenum.py deleted file mode 100644 index 01f6f677..00000000 --- a/src/mistralai/models/toolchoiceenum.py +++ /dev/null @@ -1,12 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -ToolChoiceEnum = Literal[ - "auto", - "none", - "any", - "required", -] diff --git a/src/mistralai/models/toolexecutiondeltaevent.py b/src/mistralai/models/toolexecutiondeltaevent.py deleted file mode 100644 index 4fca46a8..00000000 --- a/src/mistralai/models/toolexecutiondeltaevent.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .builtinconnectors import BuiltInConnectors -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolExecutionDeltaEventType = Literal["tool.execution.delta",] - - -ToolExecutionDeltaEventNameTypedDict = TypeAliasType( - "ToolExecutionDeltaEventNameTypedDict", Union[BuiltInConnectors, str] -) - - -ToolExecutionDeltaEventName = TypeAliasType( - "ToolExecutionDeltaEventName", Union[BuiltInConnectors, str] -) - - -class ToolExecutionDeltaEventTypedDict(TypedDict): - id: str - name: ToolExecutionDeltaEventNameTypedDict - arguments: str - type: NotRequired[ToolExecutionDeltaEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - - -class ToolExecutionDeltaEvent(BaseModel): - id: str - - name: ToolExecutionDeltaEventName - - arguments: str - - type: Optional[ToolExecutionDeltaEventType] = "tool.execution.delta" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 diff --git a/src/mistralai/models/toolexecutiondoneevent.py b/src/mistralai/models/toolexecutiondoneevent.py deleted file mode 100644 index 621d5571..00000000 --- a/src/mistralai/models/toolexecutiondoneevent.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .builtinconnectors import BuiltInConnectors -from datetime import datetime -from mistralai.types import BaseModel -from typing import Any, Dict, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolExecutionDoneEventType = Literal["tool.execution.done",] - - -ToolExecutionDoneEventNameTypedDict = TypeAliasType( - "ToolExecutionDoneEventNameTypedDict", Union[BuiltInConnectors, str] -) - - -ToolExecutionDoneEventName = TypeAliasType( - "ToolExecutionDoneEventName", Union[BuiltInConnectors, str] -) - - -class ToolExecutionDoneEventTypedDict(TypedDict): - id: str - name: ToolExecutionDoneEventNameTypedDict - type: NotRequired[ToolExecutionDoneEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - info: NotRequired[Dict[str, Any]] - - -class ToolExecutionDoneEvent(BaseModel): - id: str - - name: ToolExecutionDoneEventName - - type: Optional[ToolExecutionDoneEventType] = "tool.execution.done" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 - - info: Optional[Dict[str, Any]] = None diff --git a/src/mistralai/models/toolexecutionentry.py b/src/mistralai/models/toolexecutionentry.py deleted file mode 100644 index 9f70a63b..00000000 --- a/src/mistralai/models/toolexecutionentry.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .builtinconnectors import BuiltInConnectors -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolExecutionEntryObject = Literal["entry",] - - -ToolExecutionEntryType = Literal["tool.execution",] - - -NameTypedDict = TypeAliasType("NameTypedDict", Union[BuiltInConnectors, str]) - - -Name = TypeAliasType("Name", Union[BuiltInConnectors, str]) - - -class ToolExecutionEntryTypedDict(TypedDict): - name: NameTypedDict - arguments: str - object: NotRequired[ToolExecutionEntryObject] - type: NotRequired[ToolExecutionEntryType] - created_at: NotRequired[datetime] - completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] - info: NotRequired[Dict[str, Any]] - - -class ToolExecutionEntry(BaseModel): - name: Name - - arguments: str - - object: Optional[ToolExecutionEntryObject] = "entry" - - type: Optional[ToolExecutionEntryType] = "tool.execution" - - created_at: Optional[datetime] = None - - completed_at: OptionalNullable[datetime] = UNSET - - id: Optional[str] = None - - info: Optional[Dict[str, Any]] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id", "info"] - nullable_fields = ["completed_at"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/toolexecutionstartedevent.py b/src/mistralai/models/toolexecutionstartedevent.py deleted file mode 100644 index 80dd5e97..00000000 --- a/src/mistralai/models/toolexecutionstartedevent.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .builtinconnectors import BuiltInConnectors -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolExecutionStartedEventType = Literal["tool.execution.started",] - - -ToolExecutionStartedEventNameTypedDict = TypeAliasType( - "ToolExecutionStartedEventNameTypedDict", Union[BuiltInConnectors, str] -) - - -ToolExecutionStartedEventName = TypeAliasType( - "ToolExecutionStartedEventName", Union[BuiltInConnectors, str] -) - - -class ToolExecutionStartedEventTypedDict(TypedDict): - id: str - name: ToolExecutionStartedEventNameTypedDict - arguments: str - type: NotRequired[ToolExecutionStartedEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - - -class ToolExecutionStartedEvent(BaseModel): - id: str - - name: ToolExecutionStartedEventName - - arguments: str - - type: Optional[ToolExecutionStartedEventType] = "tool.execution.started" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 diff --git a/src/mistralai/models/toolfilechunk.py b/src/mistralai/models/toolfilechunk.py deleted file mode 100644 index 87bc822c..00000000 --- a/src/mistralai/models/toolfilechunk.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .builtinconnectors import BuiltInConnectors -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolFileChunkType = Literal["tool_file",] - - -ToolFileChunkToolTypedDict = TypeAliasType( - "ToolFileChunkToolTypedDict", Union[BuiltInConnectors, str] -) - - -ToolFileChunkTool = TypeAliasType("ToolFileChunkTool", Union[BuiltInConnectors, str]) - - -class ToolFileChunkTypedDict(TypedDict): - tool: ToolFileChunkToolTypedDict - file_id: str - type: NotRequired[ToolFileChunkType] - file_name: NotRequired[Nullable[str]] - file_type: NotRequired[Nullable[str]] - - -class ToolFileChunk(BaseModel): - tool: ToolFileChunkTool - - file_id: str - - type: Optional[ToolFileChunkType] = "tool_file" - - file_name: OptionalNullable[str] = UNSET - - file_type: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "file_name", "file_type"] - nullable_fields = ["file_name", "file_type"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/toolmessage.py b/src/mistralai/models/toolmessage.py deleted file mode 100644 index ef917c43..00000000 --- a/src/mistralai/models/toolmessage.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolMessageContentTypedDict = TypeAliasType( - "ToolMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) - - -ToolMessageRole = Literal["tool",] - - -class ToolMessageTypedDict(TypedDict): - content: Nullable[ToolMessageContentTypedDict] - tool_call_id: NotRequired[Nullable[str]] - name: NotRequired[Nullable[str]] - role: NotRequired[ToolMessageRole] - - -class ToolMessage(BaseModel): - content: Nullable[ToolMessageContent] - - tool_call_id: OptionalNullable[str] = UNSET - - name: OptionalNullable[str] = UNSET - - role: Optional[ToolMessageRole] = "tool" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["tool_call_id", "name", "role"] - nullable_fields = ["content", "tool_call_id", "name"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/toolreferencechunk.py b/src/mistralai/models/toolreferencechunk.py deleted file mode 100644 index 2a751cb0..00000000 --- a/src/mistralai/models/toolreferencechunk.py +++ /dev/null @@ -1,74 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .builtinconnectors import BuiltInConnectors -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolReferenceChunkType = Literal["tool_reference",] - - -ToolReferenceChunkToolTypedDict = TypeAliasType( - "ToolReferenceChunkToolTypedDict", Union[BuiltInConnectors, str] -) - - -ToolReferenceChunkTool = TypeAliasType( - "ToolReferenceChunkTool", Union[BuiltInConnectors, str] -) - - -class ToolReferenceChunkTypedDict(TypedDict): - tool: ToolReferenceChunkToolTypedDict - title: str - type: NotRequired[ToolReferenceChunkType] - url: NotRequired[Nullable[str]] - favicon: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - - -class ToolReferenceChunk(BaseModel): - tool: ToolReferenceChunkTool - - title: str - - type: Optional[ToolReferenceChunkType] = "tool_reference" - - url: OptionalNullable[str] = UNSET - - favicon: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "url", "favicon", "description"] - nullable_fields = ["url", "favicon", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/tooltypes.py b/src/mistralai/models/tooltypes.py deleted file mode 100644 index f54893c2..00000000 --- a/src/mistralai/models/tooltypes.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -ToolTypes = Union[Literal["function",], UnrecognizedStr] diff --git a/src/mistralai/models/trainingfile.py b/src/mistralai/models/trainingfile.py deleted file mode 100644 index 99bd49dd..00000000 --- a/src/mistralai/models/trainingfile.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class TrainingFileTypedDict(TypedDict): - file_id: str - weight: NotRequired[float] - - -class TrainingFile(BaseModel): - file_id: str - - weight: Optional[float] = 1 diff --git a/src/mistralai/models/transcriptionresponse.py b/src/mistralai/models/transcriptionresponse.py deleted file mode 100644 index 54a98a5b..00000000 --- a/src/mistralai/models/transcriptionresponse.py +++ /dev/null @@ -1,79 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .transcriptionsegmentchunk import ( - TranscriptionSegmentChunk, - TranscriptionSegmentChunkTypedDict, -) -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL -import pydantic -from pydantic import ConfigDict, model_serializer -from typing import Any, Dict, List, Optional -from typing_extensions import NotRequired, TypedDict - - -class TranscriptionResponseTypedDict(TypedDict): - model: str - text: str - usage: UsageInfoTypedDict - language: Nullable[str] - segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] - - -class TranscriptionResponse(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - model: str - - text: str - - usage: UsageInfo - - language: Nullable[str] - - segments: Optional[List[TranscriptionSegmentChunk]] = None - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["segments"] - nullable_fields = ["language"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - for k, v in serialized.items(): - m[k] = v - - return m diff --git a/src/mistralai/models/transcriptionsegmentchunk.py b/src/mistralai/models/transcriptionsegmentchunk.py deleted file mode 100644 index 40ad20b3..00000000 --- a/src/mistralai/models/transcriptionsegmentchunk.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import ConfigDict, model_serializer -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -Type = Literal["transcription_segment",] - - -class TranscriptionSegmentChunkTypedDict(TypedDict): - text: str - start: float - end: float - score: NotRequired[Nullable[float]] - speaker_id: NotRequired[Nullable[str]] - type: NotRequired[Type] - - -class TranscriptionSegmentChunk(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - text: str - - start: float - - end: float - - score: OptionalNullable[float] = UNSET - - speaker_id: OptionalNullable[str] = UNSET - - type: Optional[Type] = "transcription_segment" - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["score", "speaker_id", "type"] - nullable_fields = ["score", "speaker_id"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - for k, v in serialized.items(): - m[k] = v - - return m diff --git a/src/mistralai/models/transcriptionstreamdone.py b/src/mistralai/models/transcriptionstreamdone.py deleted file mode 100644 index e1b1ab3d..00000000 --- a/src/mistralai/models/transcriptionstreamdone.py +++ /dev/null @@ -1,85 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .transcriptionsegmentchunk import ( - TranscriptionSegmentChunk, - TranscriptionSegmentChunkTypedDict, -) -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL -import pydantic -from pydantic import ConfigDict, model_serializer -from typing import Any, Dict, List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamDoneType = Literal["transcription.done",] - - -class TranscriptionStreamDoneTypedDict(TypedDict): - model: str - text: str - usage: UsageInfoTypedDict - language: Nullable[str] - segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] - type: NotRequired[TranscriptionStreamDoneType] - - -class TranscriptionStreamDone(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - model: str - - text: str - - usage: UsageInfo - - language: Nullable[str] - - segments: Optional[List[TranscriptionSegmentChunk]] = None - - type: Optional[TranscriptionStreamDoneType] = "transcription.done" - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["segments", "type"] - nullable_fields = ["language"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - for k, v in serialized.items(): - m[k] = v - - return m diff --git a/src/mistralai/models/transcriptionstreamevents.py b/src/mistralai/models/transcriptionstreamevents.py deleted file mode 100644 index 8207c03f..00000000 --- a/src/mistralai/models/transcriptionstreamevents.py +++ /dev/null @@ -1,58 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .transcriptionstreamdone import ( - TranscriptionStreamDone, - TranscriptionStreamDoneTypedDict, -) -from .transcriptionstreameventtypes import TranscriptionStreamEventTypes -from .transcriptionstreamlanguage import ( - TranscriptionStreamLanguage, - TranscriptionStreamLanguageTypedDict, -) -from .transcriptionstreamsegmentdelta import ( - TranscriptionStreamSegmentDelta, - TranscriptionStreamSegmentDeltaTypedDict, -) -from .transcriptionstreamtextdelta import ( - TranscriptionStreamTextDelta, - TranscriptionStreamTextDeltaTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -TranscriptionStreamEventsDataTypedDict = TypeAliasType( - "TranscriptionStreamEventsDataTypedDict", - Union[ - TranscriptionStreamTextDeltaTypedDict, - TranscriptionStreamLanguageTypedDict, - TranscriptionStreamSegmentDeltaTypedDict, - TranscriptionStreamDoneTypedDict, - ], -) - - -TranscriptionStreamEventsData = Annotated[ - Union[ - Annotated[TranscriptionStreamDone, Tag("transcription.done")], - Annotated[TranscriptionStreamLanguage, Tag("transcription.language")], - Annotated[TranscriptionStreamSegmentDelta, Tag("transcription.segment")], - Annotated[TranscriptionStreamTextDelta, Tag("transcription.text.delta")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -class TranscriptionStreamEventsTypedDict(TypedDict): - event: TranscriptionStreamEventTypes - data: TranscriptionStreamEventsDataTypedDict - - -class TranscriptionStreamEvents(BaseModel): - event: TranscriptionStreamEventTypes - - data: TranscriptionStreamEventsData diff --git a/src/mistralai/models/transcriptionstreameventtypes.py b/src/mistralai/models/transcriptionstreameventtypes.py deleted file mode 100644 index 4a910f0a..00000000 --- a/src/mistralai/models/transcriptionstreameventtypes.py +++ /dev/null @@ -1,12 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -TranscriptionStreamEventTypes = Literal[ - "transcription.language", - "transcription.segment", - "transcription.text.delta", - "transcription.done", -] diff --git a/src/mistralai/models/transcriptionstreamlanguage.py b/src/mistralai/models/transcriptionstreamlanguage.py deleted file mode 100644 index 15b75144..00000000 --- a/src/mistralai/models/transcriptionstreamlanguage.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -import pydantic -from pydantic import ConfigDict -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamLanguageType = Literal["transcription.language",] - - -class TranscriptionStreamLanguageTypedDict(TypedDict): - audio_language: str - type: NotRequired[TranscriptionStreamLanguageType] - - -class TranscriptionStreamLanguage(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - audio_language: str - - type: Optional[TranscriptionStreamLanguageType] = "transcription.language" - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/models/transcriptionstreamsegmentdelta.py b/src/mistralai/models/transcriptionstreamsegmentdelta.py deleted file mode 100644 index 550c83e7..00000000 --- a/src/mistralai/models/transcriptionstreamsegmentdelta.py +++ /dev/null @@ -1,77 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import ConfigDict, model_serializer -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamSegmentDeltaType = Literal["transcription.segment",] - - -class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): - text: str - start: float - end: float - speaker_id: NotRequired[Nullable[str]] - type: NotRequired[TranscriptionStreamSegmentDeltaType] - - -class TranscriptionStreamSegmentDelta(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - text: str - - start: float - - end: float - - speaker_id: OptionalNullable[str] = UNSET - - type: Optional[TranscriptionStreamSegmentDeltaType] = "transcription.segment" - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["speaker_id", "type"] - nullable_fields = ["speaker_id"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - for k, v in serialized.items(): - m[k] = v - - return m diff --git a/src/mistralai/models/transcriptionstreamtextdelta.py b/src/mistralai/models/transcriptionstreamtextdelta.py deleted file mode 100644 index daee151f..00000000 --- a/src/mistralai/models/transcriptionstreamtextdelta.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -import pydantic -from pydantic import ConfigDict -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamTextDeltaType = Literal["transcription.text.delta",] - - -class TranscriptionStreamTextDeltaTypedDict(TypedDict): - text: str - type: NotRequired[TranscriptionStreamTextDeltaType] - - -class TranscriptionStreamTextDelta(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - text: str - - type: Optional[TranscriptionStreamTextDeltaType] = "transcription.text.delta" - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/models/unarchiveftmodelout.py b/src/mistralai/models/unarchiveftmodelout.py deleted file mode 100644 index 55c0ea8a..00000000 --- a/src/mistralai/models/unarchiveftmodelout.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -UnarchiveFTModelOutObject = Literal["model",] - - -class UnarchiveFTModelOutTypedDict(TypedDict): - id: str - object: NotRequired[UnarchiveFTModelOutObject] - archived: NotRequired[bool] - - -class UnarchiveFTModelOut(BaseModel): - id: str - - object: Optional[UnarchiveFTModelOutObject] = "model" - - archived: Optional[bool] = False diff --git a/src/mistralai/models/updateftmodelin.py b/src/mistralai/models/updateftmodelin.py deleted file mode 100644 index 1bd0eaf2..00000000 --- a/src/mistralai/models/updateftmodelin.py +++ /dev/null @@ -1,47 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class UpdateFTModelInTypedDict(TypedDict): - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - - -class UpdateFTModelIn(BaseModel): - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["name", "description"] - nullable_fields = ["name", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/uploadfileout.py b/src/mistralai/models/uploadfileout.py deleted file mode 100644 index f235fdcd..00000000 --- a/src/mistralai/models/uploadfileout.py +++ /dev/null @@ -1,88 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .filepurpose import FilePurpose -from .sampletype import SampleType -from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import model_serializer -from typing_extensions import Annotated, NotRequired, TypedDict - - -class UploadFileOutTypedDict(TypedDict): - id: str - r"""The unique identifier of the file.""" - object: str - r"""The object type, which is always \"file\".""" - size_bytes: int - r"""The size of the file, in bytes.""" - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - filename: str - r"""The name of the uploaded file.""" - purpose: FilePurpose - sample_type: SampleType - source: Source - num_lines: NotRequired[Nullable[int]] - mimetype: NotRequired[Nullable[str]] - signature: NotRequired[Nullable[str]] - - -class UploadFileOut(BaseModel): - id: str - r"""The unique identifier of the file.""" - - object: str - r"""The object type, which is always \"file\".""" - - size_bytes: Annotated[int, pydantic.Field(alias="bytes")] - r"""The size of the file, in bytes.""" - - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - - filename: str - r"""The name of the uploaded file.""" - - purpose: FilePurpose - - sample_type: SampleType - - source: Source - - num_lines: OptionalNullable[int] = UNSET - - mimetype: OptionalNullable[str] = UNSET - - signature: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["num_lines", "mimetype", "signature"] - nullable_fields = ["num_lines", "mimetype", "signature"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/usageinfo.py b/src/mistralai/models/usageinfo.py deleted file mode 100644 index cedad5c1..00000000 --- a/src/mistralai/models/usageinfo.py +++ /dev/null @@ -1,76 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import ConfigDict, model_serializer -from typing import Any, Dict, Optional -from typing_extensions import NotRequired, TypedDict - - -class UsageInfoTypedDict(TypedDict): - prompt_tokens: NotRequired[int] - completion_tokens: NotRequired[int] - total_tokens: NotRequired[int] - prompt_audio_seconds: NotRequired[Nullable[int]] - - -class UsageInfo(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - prompt_tokens: Optional[int] = 0 - - completion_tokens: Optional[int] = 0 - - total_tokens: Optional[int] = 0 - - prompt_audio_seconds: OptionalNullable[int] = UNSET - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "prompt_tokens", - "completion_tokens", - "total_tokens", - "prompt_audio_seconds", - ] - nullable_fields = ["prompt_audio_seconds"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - for k, v in serialized.items(): - m[k] = v - - return m diff --git a/src/mistralai/models/usermessage.py b/src/mistralai/models/usermessage.py deleted file mode 100644 index 61590bed..00000000 --- a/src/mistralai/models/usermessage.py +++ /dev/null @@ -1,60 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -UserMessageContentTypedDict = TypeAliasType( - "UserMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) - - -UserMessageRole = Literal["user",] - - -class UserMessageTypedDict(TypedDict): - content: Nullable[UserMessageContentTypedDict] - role: NotRequired[UserMessageRole] - - -class UserMessage(BaseModel): - content: Nullable[UserMessageContent] - - role: Optional[UserMessageRole] = "user" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["role"] - nullable_fields = ["content"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/validationerror.py b/src/mistralai/models/validationerror.py deleted file mode 100644 index e971e016..00000000 --- a/src/mistralai/models/validationerror.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import List, Union -from typing_extensions import TypeAliasType, TypedDict - - -LocTypedDict = TypeAliasType("LocTypedDict", Union[str, int]) - - -Loc = TypeAliasType("Loc", Union[str, int]) - - -class ValidationErrorTypedDict(TypedDict): - loc: List[LocTypedDict] - msg: str - type: str - - -class ValidationError(BaseModel): - loc: List[Loc] - - msg: str - - type: str diff --git a/src/mistralai/models/wandbintegration.py b/src/mistralai/models/wandbintegration.py deleted file mode 100644 index 69053896..00000000 --- a/src/mistralai/models/wandbintegration.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WandbIntegrationType = Literal["wandb",] - - -class WandbIntegrationTypedDict(TypedDict): - project: str - r"""The name of the project that the new run will be created under.""" - api_key: str - r"""The WandB API key to use for authentication.""" - type: NotRequired[WandbIntegrationType] - name: NotRequired[Nullable[str]] - r"""A display name to set for the run. If not set, will use the job ID as the name.""" - run_name: NotRequired[Nullable[str]] - - -class WandbIntegration(BaseModel): - project: str - r"""The name of the project that the new run will be created under.""" - - api_key: str - r"""The WandB API key to use for authentication.""" - - type: Optional[WandbIntegrationType] = "wandb" - - name: OptionalNullable[str] = UNSET - r"""A display name to set for the run. If not set, will use the job ID as the name.""" - - run_name: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "name", "run_name"] - nullable_fields = ["name", "run_name"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/wandbintegrationout.py b/src/mistralai/models/wandbintegrationout.py deleted file mode 100644 index f5a9ba80..00000000 --- a/src/mistralai/models/wandbintegrationout.py +++ /dev/null @@ -1,64 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WandbIntegrationOutType = Literal["wandb",] - - -class WandbIntegrationOutTypedDict(TypedDict): - project: str - r"""The name of the project that the new run will be created under.""" - type: NotRequired[WandbIntegrationOutType] - name: NotRequired[Nullable[str]] - r"""A display name to set for the run. If not set, will use the job ID as the name.""" - run_name: NotRequired[Nullable[str]] - url: NotRequired[Nullable[str]] - - -class WandbIntegrationOut(BaseModel): - project: str - r"""The name of the project that the new run will be created under.""" - - type: Optional[WandbIntegrationOutType] = "wandb" - - name: OptionalNullable[str] = UNSET - r"""A display name to set for the run. If not set, will use the job ID as the name.""" - - run_name: OptionalNullable[str] = UNSET - - url: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "name", "run_name", "url"] - nullable_fields = ["name", "run_name", "url"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/websearchpremiumtool.py b/src/mistralai/models/websearchpremiumtool.py deleted file mode 100644 index 3bbe753a..00000000 --- a/src/mistralai/models/websearchpremiumtool.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WebSearchPremiumToolType = Literal["web_search_premium",] - - -class WebSearchPremiumToolTypedDict(TypedDict): - type: NotRequired[WebSearchPremiumToolType] - - -class WebSearchPremiumTool(BaseModel): - type: Optional[WebSearchPremiumToolType] = "web_search_premium" diff --git a/src/mistralai/models/websearchtool.py b/src/mistralai/models/websearchtool.py deleted file mode 100644 index eeafecb4..00000000 --- a/src/mistralai/models/websearchtool.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WebSearchToolType = Literal["web_search",] - - -class WebSearchToolTypedDict(TypedDict): - type: NotRequired[WebSearchToolType] - - -class WebSearchTool(BaseModel): - type: Optional[WebSearchToolType] = "web_search" diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py deleted file mode 100644 index d44930a0..00000000 --- a/src/mistralai/models_.py +++ /dev/null @@ -1,1063 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Mapping, Optional - - -class Models(BaseSDK): - r"""Model Management API""" - - def list( - self, - *, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ModelList: - r"""List Models - - List all models available to the user. - - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - req = self._build_request( - method="GET", - path="/v1/models", - base_url=base_url, - url_variables=url_variables, - request=None, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="list_models_v1_models_get", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ModelList, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_async( - self, - *, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ModelList: - r"""List Models - - List all models available to the user. - - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - req = self._build_request_async( - method="GET", - path="/v1/models", - base_url=base_url, - url_variables=url_variables, - request=None, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="list_models_v1_models_get", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ModelList, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def retrieve( - self, - *, - model_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: - r"""Retrieve Model - - Retrieve information about a model. - - :param model_id: The ID of the model to retrieve. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.RetrieveModelV1ModelsModelIDGetRequest( - model_id=model_id, - ) - - req = self._build_request( - method="GET", - path="/v1/models/{model_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="retrieve_model_v1_models__model_id__get", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, - http_res, - ) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def retrieve_async( - self, - *, - model_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: - r"""Retrieve Model - - Retrieve information about a model. - - :param model_id: The ID of the model to retrieve. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.RetrieveModelV1ModelsModelIDGetRequest( - model_id=model_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/models/{model_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="retrieve_model_v1_models__model_id__get", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, - http_res, - ) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def delete( - self, - *, - model_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DeleteModelOut: - r"""Delete Model - - Delete a fine-tuned model. - - :param model_id: The ID of the model to delete. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.DeleteModelV1ModelsModelIDDeleteRequest( - model_id=model_id, - ) - - req = self._build_request( - method="DELETE", - path="/v1/models/{model_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="delete_model_v1_models__model_id__delete", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DeleteModelOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def delete_async( - self, - *, - model_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DeleteModelOut: - r"""Delete Model - - Delete a fine-tuned model. - - :param model_id: The ID of the model to delete. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.DeleteModelV1ModelsModelIDDeleteRequest( - model_id=model_id, - ) - - req = self._build_request_async( - method="DELETE", - path="/v1/models/{model_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="delete_model_v1_models__model_id__delete", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DeleteModelOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def update( - self, - *, - model_id: str, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: - r"""Update Fine Tuned Model - - Update a model name or description. - - :param model_id: The ID of the model to update. - :param name: - :param description: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( - model_id=model_id, - update_ft_model_in=models.UpdateFTModelIn( - name=name, - description=description, - ), - ) - - req = self._build_request( - method="PATCH", - path="/v1/fine_tuning/models/{model_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def update_async( - self, - *, - model_id: str, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: - r"""Update Fine Tuned Model - - Update a model name or description. - - :param model_id: The ID of the model to update. - :param name: - :param description: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( - model_id=model_id, - update_ft_model_in=models.UpdateFTModelIn( - name=name, - description=description, - ), - ) - - req = self._build_request_async( - method="PATCH", - path="/v1/fine_tuning/models/{model_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def archive( - self, - *, - model_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ArchiveFTModelOut: - r"""Archive Fine Tuned Model - - Archive a fine-tuned model. - - :param model_id: The ID of the model to archive. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( - model_id=model_id, - ) - - req = self._build_request( - method="POST", - path="/v1/fine_tuning/models/{model_id}/archive", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ArchiveFTModelOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def archive_async( - self, - *, - model_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ArchiveFTModelOut: - r"""Archive Fine Tuned Model - - Archive a fine-tuned model. - - :param model_id: The ID of the model to archive. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( - model_id=model_id, - ) - - req = self._build_request_async( - method="POST", - path="/v1/fine_tuning/models/{model_id}/archive", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ArchiveFTModelOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def unarchive( - self, - *, - model_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UnarchiveFTModelOut: - r"""Unarchive Fine Tuned Model - - Un-archive a fine-tuned model. - - :param model_id: The ID of the model to unarchive. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( - model_id=model_id, - ) - - req = self._build_request( - method="DELETE", - path="/v1/fine_tuning/models/{model_id}/archive", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UnarchiveFTModelOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def unarchive_async( - self, - *, - model_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UnarchiveFTModelOut: - r"""Unarchive Fine Tuned Model - - Un-archive a fine-tuned model. - - :param model_id: The ID of the model to unarchive. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( - model_id=model_id, - ) - - req = self._build_request_async( - method="DELETE", - path="/v1/fine_tuning/models/{model_id}/archive", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UnarchiveFTModelOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py deleted file mode 100644 index ceb7dd85..00000000 --- a/src/mistralai/ocr.py +++ /dev/null @@ -1,303 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - ocrrequest as models_ocrrequest, - responseformat as models_responseformat, -) -from mistralai.types import Nullable, OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, List, Mapping, Optional, Union - - -class Ocr(BaseSDK): - r"""OCR API""" - - def process( - self, - *, - model: Nullable[str], - document: Union[ - models_ocrrequest.Document, models_ocrrequest.DocumentTypedDict - ], - id: Optional[str] = None, - pages: OptionalNullable[List[int]] = UNSET, - include_image_base64: OptionalNullable[bool] = UNSET, - image_limit: OptionalNullable[int] = UNSET, - image_min_size: OptionalNullable[int] = UNSET, - bbox_annotation_format: OptionalNullable[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = UNSET, - document_annotation_format: OptionalNullable[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = UNSET, - document_annotation_prompt: OptionalNullable[str] = UNSET, - table_format: OptionalNullable[models_ocrrequest.TableFormat] = UNSET, - extract_header: Optional[bool] = None, - extract_footer: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.OCRResponse: - r"""OCR - - :param model: - :param document: Document to run OCR on - :param id: - :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 - :param include_image_base64: Include image URLs in response - :param image_limit: Max images to extract - :param image_min_size: Minimum height and width of image to extract - :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field - :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field - :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. - :param table_format: - :param extract_header: - :param extract_footer: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.OCRRequest( - model=model, - id=id, - document=utils.get_pydantic_model(document, models.Document), - pages=pages, - include_image_base64=include_image_base64, - image_limit=image_limit, - image_min_size=image_min_size, - bbox_annotation_format=utils.get_pydantic_model( - bbox_annotation_format, OptionalNullable[models.ResponseFormat] - ), - document_annotation_format=utils.get_pydantic_model( - document_annotation_format, OptionalNullable[models.ResponseFormat] - ), - document_annotation_prompt=document_annotation_prompt, - table_format=table_format, - extract_header=extract_header, - extract_footer=extract_footer, - ) - - req = self._build_request( - method="POST", - path="/v1/ocr", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.OCRRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="ocr_v1_ocr_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.OCRResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def process_async( - self, - *, - model: Nullable[str], - document: Union[ - models_ocrrequest.Document, models_ocrrequest.DocumentTypedDict - ], - id: Optional[str] = None, - pages: OptionalNullable[List[int]] = UNSET, - include_image_base64: OptionalNullable[bool] = UNSET, - image_limit: OptionalNullable[int] = UNSET, - image_min_size: OptionalNullable[int] = UNSET, - bbox_annotation_format: OptionalNullable[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = UNSET, - document_annotation_format: OptionalNullable[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = UNSET, - document_annotation_prompt: OptionalNullable[str] = UNSET, - table_format: OptionalNullable[models_ocrrequest.TableFormat] = UNSET, - extract_header: Optional[bool] = None, - extract_footer: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.OCRResponse: - r"""OCR - - :param model: - :param document: Document to run OCR on - :param id: - :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 - :param include_image_base64: Include image URLs in response - :param image_limit: Max images to extract - :param image_min_size: Minimum height and width of image to extract - :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field - :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field - :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. - :param table_format: - :param extract_header: - :param extract_footer: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.OCRRequest( - model=model, - id=id, - document=utils.get_pydantic_model(document, models.Document), - pages=pages, - include_image_base64=include_image_base64, - image_limit=image_limit, - image_min_size=image_min_size, - bbox_annotation_format=utils.get_pydantic_model( - bbox_annotation_format, OptionalNullable[models.ResponseFormat] - ), - document_annotation_format=utils.get_pydantic_model( - document_annotation_format, OptionalNullable[models.ResponseFormat] - ), - document_annotation_prompt=document_annotation_prompt, - table_format=table_format, - extract_header=extract_header, - extract_footer=extract_footer, - ) - - req = self._build_request_async( - method="POST", - path="/v1/ocr", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.OCRRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="ocr_v1_ocr_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.OCRResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/py.typed b/src/mistralai/py.typed deleted file mode 100644 index 3e38f1a9..00000000 --- a/src/mistralai/py.typed +++ /dev/null @@ -1 +0,0 @@ -# Marker file for PEP 561. The package enables type hints. diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py deleted file mode 100644 index c83b53e0..00000000 --- a/src/mistralai/sdk.py +++ /dev/null @@ -1,222 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients -from .sdkconfiguration import SDKConfiguration -from .utils.logger import Logger, get_default_logger -from .utils.retries import RetryConfig -import httpx -import importlib -from mistralai import models, utils -from mistralai._hooks import SDKHooks -from mistralai.types import OptionalNullable, UNSET -import sys -from typing import Any, Callable, Dict, Optional, TYPE_CHECKING, Union, cast -import weakref - -if TYPE_CHECKING: - from mistralai.agents import Agents - from mistralai.audio import Audio - from mistralai.batch import Batch - from mistralai.beta import Beta - from mistralai.chat import Chat - from mistralai.classifiers import Classifiers - from mistralai.embeddings import Embeddings - from mistralai.files import Files - from mistralai.fim import Fim - from mistralai.fine_tuning import FineTuning - from mistralai.models_ import Models - from mistralai.ocr import Ocr - - -class Mistral(BaseSDK): - r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" - - models: "Models" - r"""Model Management API""" - beta: "Beta" - files: "Files" - r"""Files API""" - fine_tuning: "FineTuning" - batch: "Batch" - chat: "Chat" - r"""Chat Completion API.""" - fim: "Fim" - r"""Fill-in-the-middle API.""" - agents: "Agents" - r"""Agents API.""" - embeddings: "Embeddings" - r"""Embeddings API.""" - classifiers: "Classifiers" - r"""Classifiers API.""" - ocr: "Ocr" - r"""OCR API""" - audio: "Audio" - _sub_sdk_map = { - "models": ("mistralai.models_", "Models"), - "beta": ("mistralai.beta", "Beta"), - "files": ("mistralai.files", "Files"), - "fine_tuning": ("mistralai.fine_tuning", "FineTuning"), - "batch": ("mistralai.batch", "Batch"), - "chat": ("mistralai.chat", "Chat"), - "fim": ("mistralai.fim", "Fim"), - "agents": ("mistralai.agents", "Agents"), - "embeddings": ("mistralai.embeddings", "Embeddings"), - "classifiers": ("mistralai.classifiers", "Classifiers"), - "ocr": ("mistralai.ocr", "Ocr"), - "audio": ("mistralai.audio", "Audio"), - } - - def __init__( - self, - api_key: Optional[Union[Optional[str], Callable[[], Optional[str]]]] = None, - server: Optional[str] = None, - server_url: Optional[str] = None, - url_params: Optional[Dict[str, str]] = None, - client: Optional[HttpClient] = None, - async_client: Optional[AsyncHttpClient] = None, - retry_config: OptionalNullable[RetryConfig] = UNSET, - timeout_ms: Optional[int] = None, - debug_logger: Optional[Logger] = None, - ) -> None: - r"""Instantiates the SDK configuring it with the provided parameters. - - :param api_key: The api_key required for authentication - :param server: The server by name to use for all methods - :param server_url: The server URL to use for all methods - :param url_params: Parameters to optionally template the server URL with - :param client: The HTTP client to use for all synchronous methods - :param async_client: The Async HTTP client to use for all asynchronous methods - :param retry_config: The retry configuration to use for all supported methods - :param timeout_ms: Optional request timeout applied to each operation in milliseconds - """ - client_supplied = True - if client is None: - client = httpx.Client(follow_redirects=True) - client_supplied = False - - assert issubclass( - type(client), HttpClient - ), "The provided client must implement the HttpClient protocol." - - async_client_supplied = True - if async_client is None: - async_client = httpx.AsyncClient(follow_redirects=True) - async_client_supplied = False - - if debug_logger is None: - debug_logger = get_default_logger() - - assert issubclass( - type(async_client), AsyncHttpClient - ), "The provided async_client must implement the AsyncHttpClient protocol." - - security: Any = None - if callable(api_key): - # pylint: disable=unnecessary-lambda-assignment - security = lambda: models.Security(api_key=api_key()) - else: - security = models.Security(api_key=api_key) - - if server_url is not None: - if url_params is not None: - server_url = utils.template_url(server_url, url_params) - - BaseSDK.__init__( - self, - SDKConfiguration( - client=client, - client_supplied=client_supplied, - async_client=async_client, - async_client_supplied=async_client_supplied, - security=security, - server_url=server_url, - server=server, - retry_config=retry_config, - timeout_ms=timeout_ms, - debug_logger=debug_logger, - ), - parent_ref=self, - ) - - hooks = SDKHooks() - - # pylint: disable=protected-access - self.sdk_configuration.__dict__["_hooks"] = hooks - - current_server_url, *_ = self.sdk_configuration.get_server_details() - server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, client - ) - if current_server_url != server_url: - self.sdk_configuration.server_url = server_url - - weakref.finalize( - self, - close_clients, - cast(ClientOwner, self.sdk_configuration), - self.sdk_configuration.client, - self.sdk_configuration.client_supplied, - self.sdk_configuration.async_client, - self.sdk_configuration.async_client_supplied, - ) - - def dynamic_import(self, modname, retries=3): - for attempt in range(retries): - try: - return importlib.import_module(modname) - except KeyError: - # Clear any half-initialized module and retry - sys.modules.pop(modname, None) - if attempt == retries - 1: - break - raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") - - def __getattr__(self, name: str): - if name in self._sub_sdk_map: - module_path, class_name = self._sub_sdk_map[name] - try: - module = self.dynamic_import(module_path) - klass = getattr(module, class_name) - instance = klass(self.sdk_configuration, parent_ref=self) - setattr(self, name, instance) - return instance - except ImportError as e: - raise AttributeError( - f"Failed to import module {module_path} for attribute {name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to find class {class_name} in module {module_path} for attribute {name}: {e}" - ) from e - - raise AttributeError( - f"'{type(self).__name__}' object has no attribute '{name}'" - ) - - def __dir__(self): - default_attrs = list(super().__dir__()) - lazy_attrs = list(self._sub_sdk_map.keys()) - return sorted(list(set(default_attrs + lazy_attrs))) - - def __enter__(self): - return self - - async def __aenter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.client is not None - and not self.sdk_configuration.client_supplied - ): - self.sdk_configuration.client.close() - self.sdk_configuration.client = None - - async def __aexit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.async_client is not None - and not self.sdk_configuration.async_client_supplied - ): - await self.sdk_configuration.async_client.aclose() - self.sdk_configuration.async_client = None diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py deleted file mode 100644 index 7e77925d..00000000 --- a/src/mistralai/sdkconfiguration.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from ._version import ( - __gen_version__, - __openapi_doc_version__, - __user_agent__, - __version__, -) -from .httpclient import AsyncHttpClient, HttpClient -from .utils import Logger, RetryConfig, remove_suffix -from dataclasses import dataclass -from mistralai import models -from mistralai.types import OptionalNullable, UNSET -from pydantic import Field -from typing import Callable, Dict, Optional, Tuple, Union - - -SERVER_EU = "eu" -r"""EU Production server""" -SERVERS = { - SERVER_EU: "https://api.mistral.ai", -} -"""Contains the list of servers available to the SDK""" - - -@dataclass -class SDKConfiguration: - client: Union[HttpClient, None] - client_supplied: bool - async_client: Union[AsyncHttpClient, None] - async_client_supplied: bool - debug_logger: Logger - security: Optional[Union[models.Security, Callable[[], models.Security]]] = None - server_url: Optional[str] = "" - server: Optional[str] = "" - language: str = "python" - openapi_doc_version: str = __openapi_doc_version__ - sdk_version: str = __version__ - gen_version: str = __gen_version__ - user_agent: str = __user_agent__ - retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) - timeout_ms: Optional[int] = None - - def get_server_details(self) -> Tuple[str, Dict[str, str]]: - if self.server_url is not None and self.server_url: - return remove_suffix(self.server_url, "/"), {} - if not self.server: - self.server = SERVER_EU - - if self.server not in SERVERS: - raise ValueError(f'Invalid server "{self.server}"') - - return SERVERS[self.server], {} diff --git a/src/mistralai/transcriptions.py b/src/mistralai/transcriptions.py deleted file mode 100644 index 90f2e58a..00000000 --- a/src/mistralai/transcriptions.py +++ /dev/null @@ -1,481 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - file as models_file, - timestampgranularity as models_timestampgranularity, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import eventstreaming, get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import List, Mapping, Optional, Union - - -class Transcriptions(BaseSDK): - r"""API for audio transcription.""" - - def complete( - self, - *, - model: str, - file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, - file_url: OptionalNullable[str] = UNSET, - file_id: OptionalNullable[str] = UNSET, - language: OptionalNullable[str] = UNSET, - temperature: OptionalNullable[float] = UNSET, - diarize: Optional[bool] = False, - context_bias: Optional[List[str]] = None, - timestamp_granularities: Optional[ - List[models_timestampgranularity.TimestampGranularity] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.TranscriptionResponse: - r"""Create Transcription - - :param model: ID of the model to be used. - :param file: - :param file_url: Url of a file to be transcribed - :param file_id: ID of a file uploaded to /v1/files - :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. - :param temperature: - :param diarize: - :param context_bias: - :param timestamp_granularities: Granularities of timestamps to include in the response. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AudioTranscriptionRequest( - model=model, - file=utils.get_pydantic_model(file, Optional[models.File]), - file_url=file_url, - file_id=file_id, - language=language, - temperature=temperature, - diarize=diarize, - context_bias=context_bias, - timestamp_granularities=timestamp_granularities, - ) - - req = self._build_request( - method="POST", - path="/v1/audio/transcriptions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "multipart", models.AudioTranscriptionRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="audio_api_v1_transcriptions_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.TranscriptionResponse, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def complete_async( - self, - *, - model: str, - file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, - file_url: OptionalNullable[str] = UNSET, - file_id: OptionalNullable[str] = UNSET, - language: OptionalNullable[str] = UNSET, - temperature: OptionalNullable[float] = UNSET, - diarize: Optional[bool] = False, - context_bias: Optional[List[str]] = None, - timestamp_granularities: Optional[ - List[models_timestampgranularity.TimestampGranularity] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.TranscriptionResponse: - r"""Create Transcription - - :param model: ID of the model to be used. - :param file: - :param file_url: Url of a file to be transcribed - :param file_id: ID of a file uploaded to /v1/files - :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. - :param temperature: - :param diarize: - :param context_bias: - :param timestamp_granularities: Granularities of timestamps to include in the response. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AudioTranscriptionRequest( - model=model, - file=utils.get_pydantic_model(file, Optional[models.File]), - file_url=file_url, - file_id=file_id, - language=language, - temperature=temperature, - diarize=diarize, - context_bias=context_bias, - timestamp_granularities=timestamp_granularities, - ) - - req = self._build_request_async( - method="POST", - path="/v1/audio/transcriptions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "multipart", models.AudioTranscriptionRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="audio_api_v1_transcriptions_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.TranscriptionResponse, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def stream( - self, - *, - model: str, - file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, - file_url: OptionalNullable[str] = UNSET, - file_id: OptionalNullable[str] = UNSET, - language: OptionalNullable[str] = UNSET, - temperature: OptionalNullable[float] = UNSET, - diarize: Optional[bool] = False, - context_bias: Optional[List[str]] = None, - timestamp_granularities: Optional[ - List[models_timestampgranularity.TimestampGranularity] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.TranscriptionStreamEvents]: - r"""Create Streaming Transcription (SSE) - - :param model: - :param file: - :param file_url: Url of a file to be transcribed - :param file_id: ID of a file uploaded to /v1/files - :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. - :param temperature: - :param diarize: - :param context_bias: - :param timestamp_granularities: Granularities of timestamps to include in the response. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AudioTranscriptionRequestStream( - model=model, - file=utils.get_pydantic_model(file, Optional[models.File]), - file_url=file_url, - file_id=file_id, - language=language, - temperature=temperature, - diarize=diarize, - context_bias=context_bias, - timestamp_granularities=timestamp_granularities, - ) - - req = self._build_request( - method="POST", - path="/v1/audio/transcriptions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, - False, - False, - "multipart", - models.AudioTranscriptionRequestStream, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="audio_api_v1_transcriptions_post_stream", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.TranscriptionStreamEvents), - client_ref=self, - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - async def stream_async( - self, - *, - model: str, - file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, - file_url: OptionalNullable[str] = UNSET, - file_id: OptionalNullable[str] = UNSET, - language: OptionalNullable[str] = UNSET, - temperature: OptionalNullable[float] = UNSET, - diarize: Optional[bool] = False, - context_bias: Optional[List[str]] = None, - timestamp_granularities: Optional[ - List[models_timestampgranularity.TimestampGranularity] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.TranscriptionStreamEvents]: - r"""Create Streaming Transcription (SSE) - - :param model: - :param file: - :param file_url: Url of a file to be transcribed - :param file_id: ID of a file uploaded to /v1/files - :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. - :param temperature: - :param diarize: - :param context_bias: - :param timestamp_granularities: Granularities of timestamps to include in the response. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AudioTranscriptionRequestStream( - model=model, - file=utils.get_pydantic_model(file, Optional[models.File]), - file_url=file_url, - file_id=file_id, - language=language, - temperature=temperature, - diarize=diarize, - context_bias=context_bias, - timestamp_granularities=timestamp_granularities, - ) - - req = self._build_request_async( - method="POST", - path="/v1/audio/transcriptions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, - False, - False, - "multipart", - models.AudioTranscriptionRequestStream, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="audio_api_v1_transcriptions_post_stream", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.TranscriptionStreamEvents), - client_ref=self, - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/types/__init__.py b/src/mistralai/types/__init__.py deleted file mode 100644 index fc76fe0c..00000000 --- a/src/mistralai/types/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basemodel import ( - BaseModel, - Nullable, - OptionalNullable, - UnrecognizedInt, - UnrecognizedStr, - UNSET, - UNSET_SENTINEL, -) - -__all__ = [ - "BaseModel", - "Nullable", - "OptionalNullable", - "UnrecognizedInt", - "UnrecognizedStr", - "UNSET", - "UNSET_SENTINEL", -] diff --git a/src/mistralai/types/basemodel.py b/src/mistralai/types/basemodel.py deleted file mode 100644 index a9a640a1..00000000 --- a/src/mistralai/types/basemodel.py +++ /dev/null @@ -1,77 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from pydantic import ConfigDict, model_serializer -from pydantic import BaseModel as PydanticBaseModel -from pydantic_core import core_schema -from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union -from typing_extensions import TypeAliasType, TypeAlias - - -class BaseModel(PydanticBaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() - ) - - -class Unset(BaseModel): - @model_serializer(mode="plain") - def serialize_model(self): - return UNSET_SENTINEL - - def __bool__(self) -> Literal[False]: - return False - - -UNSET = Unset() -UNSET_SENTINEL = "~?~unset~?~sentinel~?~" - - -T = TypeVar("T") -if TYPE_CHECKING: - Nullable: TypeAlias = Union[T, None] - OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] -else: - Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) - OptionalNullable = TypeAliasType( - "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) - ) - - -class UnrecognizedStr(str): - @classmethod - def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: - # Make UnrecognizedStr only work in lax mode, not strict mode - # This makes it a "fallback" option when more specific types (like Literals) don't match - def validate_lax(v: Any) -> 'UnrecognizedStr': - if isinstance(v, cls): - return v - return cls(str(v)) - - # Use lax_or_strict_schema where strict always fails - # This forces Pydantic to prefer other union members in strict mode - # and only fall back to UnrecognizedStr in lax mode - return core_schema.lax_or_strict_schema( - lax_schema=core_schema.chain_schema([ - core_schema.str_schema(), - core_schema.no_info_plain_validator_function(validate_lax) - ]), - strict_schema=core_schema.none_schema(), # Always fails in strict mode - ) - - -class UnrecognizedInt(int): - @classmethod - def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: - # Make UnrecognizedInt only work in lax mode, not strict mode - # This makes it a "fallback" option when more specific types (like Literals) don't match - def validate_lax(v: Any) -> 'UnrecognizedInt': - if isinstance(v, cls): - return v - return cls(int(v)) - return core_schema.lax_or_strict_schema( - lax_schema=core_schema.chain_schema([ - core_schema.int_schema(), - core_schema.no_info_plain_validator_function(validate_lax) - ]), - strict_schema=core_schema.none_schema(), # Always fails in strict mode - ) diff --git a/src/mistralai/utils/__init__.py b/src/mistralai/utils/__init__.py deleted file mode 100644 index f9c2edce..00000000 --- a/src/mistralai/utils/__init__.py +++ /dev/null @@ -1,197 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import TYPE_CHECKING -from importlib import import_module -import builtins -import sys - -if TYPE_CHECKING: - from .annotations import get_discriminator - from .datetimes import parse_datetime - from .enums import OpenEnumMeta - from .headers import get_headers, get_response_headers - from .metadata import ( - FieldMetadata, - find_metadata, - FormMetadata, - HeaderMetadata, - MultipartFormMetadata, - PathParamMetadata, - QueryParamMetadata, - RequestMetadata, - SecurityMetadata, - ) - from .queryparams import get_query_params - from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig - from .requestbodies import serialize_request_body, SerializedRequestBody - from .security import get_security, get_security_from_env - - from .serializers import ( - get_pydantic_model, - marshal_json, - unmarshal, - unmarshal_json, - serialize_decimal, - serialize_float, - serialize_int, - stream_to_text, - stream_to_text_async, - stream_to_bytes, - stream_to_bytes_async, - validate_const, - validate_decimal, - validate_float, - validate_int, - ) - from .url import generate_url, template_url, remove_suffix - from .values import ( - get_global_from_env, - match_content_type, - match_status_codes, - match_response, - cast_partial, - ) - from .logger import Logger, get_body_content, get_default_logger - -__all__ = [ - "BackoffStrategy", - "FieldMetadata", - "find_metadata", - "FormMetadata", - "generate_url", - "get_body_content", - "get_default_logger", - "get_discriminator", - "parse_datetime", - "get_global_from_env", - "get_headers", - "get_pydantic_model", - "get_query_params", - "get_response_headers", - "get_security", - "get_security_from_env", - "HeaderMetadata", - "Logger", - "marshal_json", - "match_content_type", - "match_status_codes", - "match_response", - "MultipartFormMetadata", - "OpenEnumMeta", - "PathParamMetadata", - "QueryParamMetadata", - "remove_suffix", - "Retries", - "retry", - "retry_async", - "RetryConfig", - "RequestMetadata", - "SecurityMetadata", - "serialize_decimal", - "serialize_float", - "serialize_int", - "serialize_request_body", - "SerializedRequestBody", - "stream_to_text", - "stream_to_text_async", - "stream_to_bytes", - "stream_to_bytes_async", - "template_url", - "unmarshal", - "unmarshal_json", - "validate_decimal", - "validate_const", - "validate_float", - "validate_int", - "cast_partial", -] - -_dynamic_imports: dict[str, str] = { - "BackoffStrategy": ".retries", - "FieldMetadata": ".metadata", - "find_metadata": ".metadata", - "FormMetadata": ".metadata", - "generate_url": ".url", - "get_body_content": ".logger", - "get_default_logger": ".logger", - "get_discriminator": ".annotations", - "parse_datetime": ".datetimes", - "get_global_from_env": ".values", - "get_headers": ".headers", - "get_pydantic_model": ".serializers", - "get_query_params": ".queryparams", - "get_response_headers": ".headers", - "get_security": ".security", - "get_security_from_env": ".security", - "HeaderMetadata": ".metadata", - "Logger": ".logger", - "marshal_json": ".serializers", - "match_content_type": ".values", - "match_status_codes": ".values", - "match_response": ".values", - "MultipartFormMetadata": ".metadata", - "OpenEnumMeta": ".enums", - "PathParamMetadata": ".metadata", - "QueryParamMetadata": ".metadata", - "remove_suffix": ".url", - "Retries": ".retries", - "retry": ".retries", - "retry_async": ".retries", - "RetryConfig": ".retries", - "RequestMetadata": ".metadata", - "SecurityMetadata": ".metadata", - "serialize_decimal": ".serializers", - "serialize_float": ".serializers", - "serialize_int": ".serializers", - "serialize_request_body": ".requestbodies", - "SerializedRequestBody": ".requestbodies", - "stream_to_text": ".serializers", - "stream_to_text_async": ".serializers", - "stream_to_bytes": ".serializers", - "stream_to_bytes_async": ".serializers", - "template_url": ".url", - "unmarshal": ".serializers", - "unmarshal_json": ".serializers", - "validate_decimal": ".serializers", - "validate_const": ".serializers", - "validate_float": ".serializers", - "validate_int": ".serializers", - "cast_partial": ".values", -} - - -def dynamic_import(modname, retries=3): - for attempt in range(retries): - try: - return import_module(modname, __package__) - except KeyError: - # Clear any half-initialized module and retry - sys.modules.pop(modname, None) - if attempt == retries - 1: - break - raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") - - -def __getattr__(attr_name: str) -> object: - module_name = _dynamic_imports.get(attr_name) - if module_name is None: - raise AttributeError( - f"no {attr_name} found in _dynamic_imports, module name -> {__name__} " - ) - - try: - module = dynamic_import(module_name) - return getattr(module, attr_name) - except ImportError as e: - raise ImportError( - f"Failed to import {attr_name} from {module_name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to get {attr_name} from {module_name}: {e}" - ) from e - - -def __dir__(): - lazy_attrs = builtins.list(_dynamic_imports.keys()) - return builtins.sorted(lazy_attrs) diff --git a/src/mistralai/utils/annotations.py b/src/mistralai/utils/annotations.py deleted file mode 100644 index 12e0aa4f..00000000 --- a/src/mistralai/utils/annotations.py +++ /dev/null @@ -1,79 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from enum import Enum -from typing import Any, Optional - - -def get_discriminator(model: Any, fieldname: str, key: str) -> str: - """ - Recursively search for the discriminator attribute in a model. - - Args: - model (Any): The model to search within. - fieldname (str): The name of the field to search for. - key (str): The key to search for in dictionaries. - - Returns: - str: The name of the discriminator attribute. - - Raises: - ValueError: If the discriminator attribute is not found. - """ - upper_fieldname = fieldname.upper() - - def get_field_discriminator(field: Any) -> Optional[str]: - """Search for the discriminator attribute in a given field.""" - - if isinstance(field, dict): - if key in field: - return f"{field[key]}" - - if hasattr(field, fieldname): - attr = getattr(field, fieldname) - if isinstance(attr, Enum): - return f"{attr.value}" - return f"{attr}" - - if hasattr(field, upper_fieldname): - attr = getattr(field, upper_fieldname) - if isinstance(attr, Enum): - return f"{attr.value}" - return f"{attr}" - - return None - - def search_nested_discriminator(obj: Any) -> Optional[str]: - """Recursively search for discriminator in nested structures.""" - # First try direct field lookup - discriminator = get_field_discriminator(obj) - if discriminator is not None: - return discriminator - - # If it's a dict, search in nested values - if isinstance(obj, dict): - for value in obj.values(): - if isinstance(value, list): - # Search in list items - for item in value: - nested_discriminator = search_nested_discriminator(item) - if nested_discriminator is not None: - return nested_discriminator - elif isinstance(value, dict): - # Search in nested dict - nested_discriminator = search_nested_discriminator(value) - if nested_discriminator is not None: - return nested_discriminator - - return None - - if isinstance(model, list): - for field in model: - discriminator = search_nested_discriminator(field) - if discriminator is not None: - return discriminator - - discriminator = search_nested_discriminator(model) - if discriminator is not None: - return discriminator - - raise ValueError(f"Could not find discriminator field {fieldname} in {model}") diff --git a/src/mistralai/utils/datetimes.py b/src/mistralai/utils/datetimes.py deleted file mode 100644 index a6c52cd6..00000000 --- a/src/mistralai/utils/datetimes.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from datetime import datetime -import sys - - -def parse_datetime(datetime_string: str) -> datetime: - """ - Convert a RFC 3339 / ISO 8601 formatted string into a datetime object. - Python versions 3.11 and later support parsing RFC 3339 directly with - datetime.fromisoformat(), but for earlier versions, this function - encapsulates the necessary extra logic. - """ - # Python 3.11 and later can parse RFC 3339 directly - if sys.version_info >= (3, 11): - return datetime.fromisoformat(datetime_string) - - # For Python 3.10 and earlier, a common ValueError is trailing 'Z' suffix, - # so fix that upfront. - if datetime_string.endswith("Z"): - datetime_string = datetime_string[:-1] + "+00:00" - - return datetime.fromisoformat(datetime_string) diff --git a/src/mistralai/utils/enums.py b/src/mistralai/utils/enums.py deleted file mode 100644 index 3324e1bc..00000000 --- a/src/mistralai/utils/enums.py +++ /dev/null @@ -1,134 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import enum -import sys -from typing import Any - -from pydantic_core import core_schema - - -class OpenEnumMeta(enum.EnumMeta): - # The __call__ method `boundary` kwarg was added in 3.11 and must be present - # for pyright. Refer also: https://github.com/pylint-dev/pylint/issues/9622 - # pylint: disable=unexpected-keyword-arg - # The __call__ method `values` varg must be named for pyright. - # pylint: disable=keyword-arg-before-vararg - - if sys.version_info >= (3, 11): - def __call__( - cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None - ): - # The `type` kwarg also happens to be a built-in that pylint flags as - # redeclared. Safe to ignore this lint rule with this scope. - # pylint: disable=redefined-builtin - - if names is not None: - return super().__call__( - value, - names=names, - *values, - module=module, - qualname=qualname, - type=type, - start=start, - boundary=boundary, - ) - - try: - return super().__call__( - value, - names=names, # pyright: ignore[reportArgumentType] - *values, - module=module, - qualname=qualname, - type=type, - start=start, - boundary=boundary, - ) - except ValueError: - return value - else: - def __call__( - cls, value, names=None, *, module=None, qualname=None, type=None, start=1 - ): - # The `type` kwarg also happens to be a built-in that pylint flags as - # redeclared. Safe to ignore this lint rule with this scope. - # pylint: disable=redefined-builtin - - if names is not None: - return super().__call__( - value, - names=names, - module=module, - qualname=qualname, - type=type, - start=start, - ) - - try: - return super().__call__( - value, - names=names, # pyright: ignore[reportArgumentType] - module=module, - qualname=qualname, - type=type, - start=start, - ) - except ValueError: - return value - - def __new__(mcs, name, bases, namespace, **kwargs): - cls = super().__new__(mcs, name, bases, namespace, **kwargs) - - # Add __get_pydantic_core_schema__ to make open enums work correctly - # in union discrimination. In strict mode (used by Pydantic for unions), - # only known enum values match. In lax mode, unknown values are accepted. - def __get_pydantic_core_schema__( - cls_inner: Any, _source_type: Any, _handler: Any - ) -> core_schema.CoreSchema: - # Create a validator that only accepts known enum values (for strict mode) - def validate_strict(v: Any) -> Any: - if isinstance(v, cls_inner): - return v - # Use the parent EnumMeta's __call__ which raises ValueError for unknown values - return enum.EnumMeta.__call__(cls_inner, v) - - # Create a lax validator that accepts unknown values - def validate_lax(v: Any) -> Any: - if isinstance(v, cls_inner): - return v - try: - return enum.EnumMeta.__call__(cls_inner, v) - except ValueError: - # Return the raw value for unknown enum values - return v - - # Determine the base type schema (str or int) - is_int_enum = False - for base in cls_inner.__mro__: - if base is int: - is_int_enum = True - break - if base is str: - break - - base_schema = ( - core_schema.int_schema() - if is_int_enum - else core_schema.str_schema() - ) - - # Use lax_or_strict_schema: - # - strict mode: only known enum values match (raises ValueError for unknown) - # - lax mode: accept any value, return enum member or raw value - return core_schema.lax_or_strict_schema( - lax_schema=core_schema.chain_schema( - [base_schema, core_schema.no_info_plain_validator_function(validate_lax)] - ), - strict_schema=core_schema.chain_schema( - [base_schema, core_schema.no_info_plain_validator_function(validate_strict)] - ), - ) - - setattr(cls, "__get_pydantic_core_schema__", classmethod(__get_pydantic_core_schema__)) - return cls diff --git a/src/mistralai/utils/eventstreaming.py b/src/mistralai/utils/eventstreaming.py deleted file mode 100644 index 0969899b..00000000 --- a/src/mistralai/utils/eventstreaming.py +++ /dev/null @@ -1,248 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import re -import json -from typing import ( - Callable, - Generic, - TypeVar, - Optional, - Generator, - AsyncGenerator, - Tuple, -) -import httpx - -T = TypeVar("T") - - -class EventStream(Generic[T]): - # Holds a reference to the SDK client to avoid it being garbage collected - # and cause termination of the underlying httpx client. - client_ref: Optional[object] - response: httpx.Response - generator: Generator[T, None, None] - - def __init__( - self, - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, - client_ref: Optional[object] = None, - ): - self.response = response - self.generator = stream_events(response, decoder, sentinel) - self.client_ref = client_ref - - def __iter__(self): - return self - - def __next__(self): - return next(self.generator) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.response.close() - - -class EventStreamAsync(Generic[T]): - # Holds a reference to the SDK client to avoid it being garbage collected - # and cause termination of the underlying httpx client. - client_ref: Optional[object] - response: httpx.Response - generator: AsyncGenerator[T, None] - - def __init__( - self, - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, - client_ref: Optional[object] = None, - ): - self.response = response - self.generator = stream_events_async(response, decoder, sentinel) - self.client_ref = client_ref - - def __aiter__(self): - return self - - async def __anext__(self): - return await self.generator.__anext__() - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc_val, exc_tb): - await self.response.aclose() - - -class ServerEvent: - id: Optional[str] = None - event: Optional[str] = None - data: Optional[str] = None - retry: Optional[int] = None - - -MESSAGE_BOUNDARIES = [ - b"\r\n\r\n", - b"\n\n", - b"\r\r", -] - - -async def stream_events_async( - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, -) -> AsyncGenerator[T, None]: - buffer = bytearray() - position = 0 - discard = False - async for chunk in response.aiter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - - buffer += chunk - for i in range(position, len(buffer)): - char = buffer[i : i + 1] - seq: Optional[bytes] = None - if char in [b"\r", b"\n"]: - for boundary in MESSAGE_BOUNDARIES: - seq = _peek_sequence(i, buffer, boundary) - if seq is not None: - break - if seq is None: - continue - - block = buffer[position:i] - position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) - if event is not None: - yield event - - if position > 0: - buffer = buffer[position:] - position = 0 - - event, discard = _parse_event(buffer, decoder, sentinel) - if event is not None: - yield event - - -def stream_events( - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, -) -> Generator[T, None, None]: - buffer = bytearray() - position = 0 - discard = False - for chunk in response.iter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - - buffer += chunk - for i in range(position, len(buffer)): - char = buffer[i : i + 1] - seq: Optional[bytes] = None - if char in [b"\r", b"\n"]: - for boundary in MESSAGE_BOUNDARIES: - seq = _peek_sequence(i, buffer, boundary) - if seq is not None: - break - if seq is None: - continue - - block = buffer[position:i] - position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) - if event is not None: - yield event - - if position > 0: - buffer = buffer[position:] - position = 0 - - event, discard = _parse_event(buffer, decoder, sentinel) - if event is not None: - yield event - - -def _parse_event( - raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None -) -> Tuple[Optional[T], bool]: - block = raw.decode() - lines = re.split(r"\r?\n|\r", block) - publish = False - event = ServerEvent() - data = "" - for line in lines: - if not line: - continue - - delim = line.find(":") - if delim <= 0: - continue - - field = line[0:delim] - value = line[delim + 1 :] if delim < len(line) - 1 else "" - if len(value) and value[0] == " ": - value = value[1:] - - if field == "event": - event.event = value - publish = True - elif field == "data": - data += value + "\n" - publish = True - elif field == "id": - event.id = value - publish = True - elif field == "retry": - event.retry = int(value) if value.isdigit() else None - publish = True - - if sentinel and data == f"{sentinel}\n": - return None, True - - if data: - data = data[:-1] - event.data = data - - data_is_primitive = ( - data.isnumeric() or data == "true" or data == "false" or data == "null" - ) - data_is_json = ( - data.startswith("{") or data.startswith("[") or data.startswith('"') - ) - - if data_is_primitive or data_is_json: - try: - event.data = json.loads(data) - except Exception: - pass - - out = None - if publish: - out = decoder(json.dumps(event.__dict__)) - - return out, False - - -def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): - if len(sequence) > (len(buffer) - position): - return None - - for i, seq in enumerate(sequence): - if buffer[position + i] != seq: - return None - - return sequence diff --git a/src/mistralai/utils/forms.py b/src/mistralai/utils/forms.py deleted file mode 100644 index f961e76b..00000000 --- a/src/mistralai/utils/forms.py +++ /dev/null @@ -1,234 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import ( - Any, - Dict, - get_type_hints, - List, - Tuple, -) -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .serializers import marshal_json - -from .metadata import ( - FormMetadata, - MultipartFormMetadata, - find_field_metadata, -) -from .values import _is_set, _val_to_string - - -def _populate_form( - field_name: str, - explode: bool, - obj: Any, - delimiter: str, - form: Dict[str, List[str]], -): - if not _is_set(obj): - return form - - if isinstance(obj, BaseModel): - items = [] - - obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields - for name in obj_fields: - obj_field = obj_fields[name] - obj_field_name = obj_field.alias if obj_field.alias is not None else name - if obj_field_name == "": - continue - - val = getattr(obj, name) - if not _is_set(val): - continue - - if explode: - form[obj_field_name] = [_val_to_string(val)] - else: - items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") - - if len(items) > 0: - form[field_name] = [delimiter.join(items)] - elif isinstance(obj, Dict): - items = [] - for key, value in obj.items(): - if not _is_set(value): - continue - - if explode: - form[key] = [_val_to_string(value)] - else: - items.append(f"{key}{delimiter}{_val_to_string(value)}") - - if len(items) > 0: - form[field_name] = [delimiter.join(items)] - elif isinstance(obj, List): - items = [] - - for value in obj: - if not _is_set(value): - continue - - if explode: - if not field_name in form: - form[field_name] = [] - form[field_name].append(_val_to_string(value)) - else: - items.append(_val_to_string(value)) - - if len(items) > 0: - form[field_name] = [delimiter.join([str(item) for item in items])] - else: - form[field_name] = [_val_to_string(obj)] - - return form - - -def _extract_file_properties(file_obj: Any) -> Tuple[str, Any, Any]: - """Extract file name, content, and content type from a file object.""" - file_fields: Dict[str, FieldInfo] = file_obj.__class__.model_fields - - file_name = "" - content = None - content_type = None - - for file_field_name in file_fields: - file_field = file_fields[file_field_name] - - file_metadata = find_field_metadata(file_field, MultipartFormMetadata) - if file_metadata is None: - continue - - if file_metadata.content: - content = getattr(file_obj, file_field_name, None) - elif file_field_name == "content_type": - content_type = getattr(file_obj, file_field_name, None) - else: - file_name = getattr(file_obj, file_field_name) - - if file_name == "" or content is None: - raise ValueError("invalid multipart/form-data file") - - return file_name, content, content_type - - -def serialize_multipart_form( - media_type: str, request: Any -) -> Tuple[str, Dict[str, Any], List[Tuple[str, Any]]]: - form: Dict[str, Any] = {} - files: List[Tuple[str, Any]] = [] - - if not isinstance(request, BaseModel): - raise TypeError("invalid request body type") - - request_fields: Dict[str, FieldInfo] = request.__class__.model_fields - request_field_types = get_type_hints(request.__class__) - - for name in request_fields: - field = request_fields[name] - - val = getattr(request, name) - if not _is_set(val): - continue - - field_metadata = find_field_metadata(field, MultipartFormMetadata) - if not field_metadata: - continue - - f_name = field.alias if field.alias else name - - if field_metadata.file: - if isinstance(val, List): - # Handle array of files - array_field_name = f_name + "[]" - for file_obj in val: - if not _is_set(file_obj): - continue - - file_name, content, content_type = _extract_file_properties( - file_obj - ) - - if content_type is not None: - files.append( - (array_field_name, (file_name, content, content_type)) - ) - else: - files.append((array_field_name, (file_name, content))) - else: - # Handle single file - file_name, content, content_type = _extract_file_properties(val) - - if content_type is not None: - files.append((f_name, (file_name, content, content_type))) - else: - files.append((f_name, (file_name, content))) - elif field_metadata.json: - files.append( - ( - f_name, - ( - None, - marshal_json(val, request_field_types[name]), - "application/json", - ), - ) - ) - else: - if isinstance(val, List): - values = [] - - for value in val: - if not _is_set(value): - continue - values.append(_val_to_string(value)) - - array_field_name = f_name + "[]" - form[array_field_name] = values - else: - form[f_name] = _val_to_string(val) - return media_type, form, files - - -def serialize_form_data(data: Any) -> Dict[str, Any]: - form: Dict[str, List[str]] = {} - - if isinstance(data, BaseModel): - data_fields: Dict[str, FieldInfo] = data.__class__.model_fields - data_field_types = get_type_hints(data.__class__) - for name in data_fields: - field = data_fields[name] - - val = getattr(data, name) - if not _is_set(val): - continue - - metadata = find_field_metadata(field, FormMetadata) - if metadata is None: - continue - - f_name = field.alias if field.alias is not None else name - - if metadata.json: - form[f_name] = [marshal_json(val, data_field_types[name])] - else: - if metadata.style == "form": - _populate_form( - f_name, - metadata.explode, - val, - ",", - form, - ) - else: - raise ValueError(f"Invalid form style for field {name}") - elif isinstance(data, Dict): - for key, value in data.items(): - if _is_set(value): - form[key] = [_val_to_string(value)] - else: - raise TypeError(f"Invalid request body type {type(data)} for form data") - - return form diff --git a/src/mistralai/utils/headers.py b/src/mistralai/utils/headers.py deleted file mode 100644 index 37864cbb..00000000 --- a/src/mistralai/utils/headers.py +++ /dev/null @@ -1,136 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import ( - Any, - Dict, - List, - Optional, -) -from httpx import Headers -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .metadata import ( - HeaderMetadata, - find_field_metadata, -) - -from .values import _is_set, _populate_from_globals, _val_to_string - - -def get_headers(headers_params: Any, gbls: Optional[Any] = None) -> Dict[str, str]: - headers: Dict[str, str] = {} - - globals_already_populated = [] - if _is_set(headers_params): - globals_already_populated = _populate_headers(headers_params, gbls, headers, []) - if _is_set(gbls): - _populate_headers(gbls, None, headers, globals_already_populated) - - return headers - - -def _populate_headers( - headers_params: Any, - gbls: Any, - header_values: Dict[str, str], - skip_fields: List[str], -) -> List[str]: - globals_already_populated: List[str] = [] - - if not isinstance(headers_params, BaseModel): - return globals_already_populated - - param_fields: Dict[str, FieldInfo] = headers_params.__class__.model_fields - for name in param_fields: - if name in skip_fields: - continue - - field = param_fields[name] - f_name = field.alias if field.alias is not None else name - - metadata = find_field_metadata(field, HeaderMetadata) - if metadata is None: - continue - - value, global_found = _populate_from_globals( - name, getattr(headers_params, name), HeaderMetadata, gbls - ) - if global_found: - globals_already_populated.append(name) - value = _serialize_header(metadata.explode, value) - - if value != "": - header_values[f_name] = value - - return globals_already_populated - - -def _serialize_header(explode: bool, obj: Any) -> str: - if not _is_set(obj): - return "" - - if isinstance(obj, BaseModel): - items = [] - obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields - for name in obj_fields: - obj_field = obj_fields[name] - obj_param_metadata = find_field_metadata(obj_field, HeaderMetadata) - - if not obj_param_metadata: - continue - - f_name = obj_field.alias if obj_field.alias is not None else name - - val = getattr(obj, name) - if not _is_set(val): - continue - - if explode: - items.append(f"{f_name}={_val_to_string(val)}") - else: - items.append(f_name) - items.append(_val_to_string(val)) - - if len(items) > 0: - return ",".join(items) - elif isinstance(obj, Dict): - items = [] - - for key, value in obj.items(): - if not _is_set(value): - continue - - if explode: - items.append(f"{key}={_val_to_string(value)}") - else: - items.append(key) - items.append(_val_to_string(value)) - - if len(items) > 0: - return ",".join([str(item) for item in items]) - elif isinstance(obj, List): - items = [] - - for value in obj: - if not _is_set(value): - continue - - items.append(_val_to_string(value)) - - if len(items) > 0: - return ",".join(items) - elif _is_set(obj): - return f"{_val_to_string(obj)}" - - return "" - - -def get_response_headers(headers: Headers) -> Dict[str, List[str]]: - res: Dict[str, List[str]] = {} - for k, v in headers.items(): - if not k in res: - res[k] = [] - - res[k].append(v) - return res diff --git a/src/mistralai/utils/logger.py b/src/mistralai/utils/logger.py deleted file mode 100644 index cc089307..00000000 --- a/src/mistralai/utils/logger.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import httpx -import logging -import os -from typing import Any, Protocol - - -class Logger(Protocol): - def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: - pass - - -class NoOpLogger: - def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: - pass - - -def get_body_content(req: httpx.Request) -> str: - return "" if not hasattr(req, "_content") else str(req.content) - - -def get_default_logger() -> Logger: - if os.getenv("MISTRAL_DEBUG"): - logging.basicConfig(level=logging.DEBUG) - return logging.getLogger("mistralai") - return NoOpLogger() diff --git a/src/mistralai/utils/metadata.py b/src/mistralai/utils/metadata.py deleted file mode 100644 index 173b3e5c..00000000 --- a/src/mistralai/utils/metadata.py +++ /dev/null @@ -1,118 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import Optional, Type, TypeVar, Union -from dataclasses import dataclass -from pydantic.fields import FieldInfo - - -T = TypeVar("T") - - -@dataclass -class SecurityMetadata: - option: bool = False - scheme: bool = False - scheme_type: Optional[str] = None - sub_type: Optional[str] = None - field_name: Optional[str] = None - - def get_field_name(self, default: str) -> str: - return self.field_name or default - - -@dataclass -class ParamMetadata: - serialization: Optional[str] = None - style: str = "simple" - explode: bool = False - - -@dataclass -class PathParamMetadata(ParamMetadata): - pass - - -@dataclass -class QueryParamMetadata(ParamMetadata): - style: str = "form" - explode: bool = True - - -@dataclass -class HeaderMetadata(ParamMetadata): - pass - - -@dataclass -class RequestMetadata: - media_type: str = "application/octet-stream" - - -@dataclass -class MultipartFormMetadata: - file: bool = False - content: bool = False - json: bool = False - - -@dataclass -class FormMetadata: - json: bool = False - style: str = "form" - explode: bool = True - - -class FieldMetadata: - security: Optional[SecurityMetadata] = None - path: Optional[PathParamMetadata] = None - query: Optional[QueryParamMetadata] = None - header: Optional[HeaderMetadata] = None - request: Optional[RequestMetadata] = None - form: Optional[FormMetadata] = None - multipart: Optional[MultipartFormMetadata] = None - - def __init__( - self, - security: Optional[SecurityMetadata] = None, - path: Optional[Union[PathParamMetadata, bool]] = None, - query: Optional[Union[QueryParamMetadata, bool]] = None, - header: Optional[Union[HeaderMetadata, bool]] = None, - request: Optional[Union[RequestMetadata, bool]] = None, - form: Optional[Union[FormMetadata, bool]] = None, - multipart: Optional[Union[MultipartFormMetadata, bool]] = None, - ): - self.security = security - self.path = PathParamMetadata() if isinstance(path, bool) else path - self.query = QueryParamMetadata() if isinstance(query, bool) else query - self.header = HeaderMetadata() if isinstance(header, bool) else header - self.request = RequestMetadata() if isinstance(request, bool) else request - self.form = FormMetadata() if isinstance(form, bool) else form - self.multipart = ( - MultipartFormMetadata() if isinstance(multipart, bool) else multipart - ) - - -def find_field_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: - metadata = find_metadata(field_info, FieldMetadata) - if not metadata: - return None - - fields = metadata.__dict__ - - for field in fields: - if isinstance(fields[field], metadata_type): - return fields[field] - - return None - - -def find_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: - metadata = field_info.metadata - if not metadata: - return None - - for md in metadata: - if isinstance(md, metadata_type): - return md - - return None diff --git a/src/mistralai/utils/queryparams.py b/src/mistralai/utils/queryparams.py deleted file mode 100644 index c04e0db8..00000000 --- a/src/mistralai/utils/queryparams.py +++ /dev/null @@ -1,217 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import ( - Any, - Dict, - get_type_hints, - List, - Optional, -) - -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .metadata import ( - QueryParamMetadata, - find_field_metadata, -) -from .values import ( - _get_serialized_params, - _is_set, - _populate_from_globals, - _val_to_string, -) -from .forms import _populate_form - - -def get_query_params( - query_params: Any, - gbls: Optional[Any] = None, - allow_empty_value: Optional[List[str]] = None, -) -> Dict[str, List[str]]: - params: Dict[str, List[str]] = {} - - globals_already_populated = _populate_query_params(query_params, gbls, params, [], allow_empty_value) - if _is_set(gbls): - _populate_query_params(gbls, None, params, globals_already_populated, allow_empty_value) - - return params - - -def _populate_query_params( - query_params: Any, - gbls: Any, - query_param_values: Dict[str, List[str]], - skip_fields: List[str], - allow_empty_value: Optional[List[str]] = None, -) -> List[str]: - globals_already_populated: List[str] = [] - - if not isinstance(query_params, BaseModel): - return globals_already_populated - - param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields - param_field_types = get_type_hints(query_params.__class__) - for name in param_fields: - if name in skip_fields: - continue - - field = param_fields[name] - - metadata = find_field_metadata(field, QueryParamMetadata) - if not metadata: - continue - - value = getattr(query_params, name) if _is_set(query_params) else None - - value, global_found = _populate_from_globals( - name, value, QueryParamMetadata, gbls - ) - if global_found: - globals_already_populated.append(name) - - f_name = field.alias if field.alias is not None else name - - allow_empty_set = set(allow_empty_value or []) - should_include_empty = f_name in allow_empty_set and ( - value is None or value == [] or value == "" - ) - - if should_include_empty: - query_param_values[f_name] = [""] - continue - - serialization = metadata.serialization - if serialization is not None: - serialized_parms = _get_serialized_params( - metadata, f_name, value, param_field_types[name] - ) - for key, value in serialized_parms.items(): - if key in query_param_values: - query_param_values[key].extend(value) - else: - query_param_values[key] = [value] - else: - style = metadata.style - if style == "deepObject": - _populate_deep_object_query_params(f_name, value, query_param_values) - elif style == "form": - _populate_delimited_query_params( - metadata, f_name, value, ",", query_param_values - ) - elif style == "pipeDelimited": - _populate_delimited_query_params( - metadata, f_name, value, "|", query_param_values - ) - else: - raise NotImplementedError( - f"query param style {style} not yet supported" - ) - - return globals_already_populated - - -def _populate_deep_object_query_params( - field_name: str, - obj: Any, - params: Dict[str, List[str]], -): - if not _is_set(obj): - return - - if isinstance(obj, BaseModel): - _populate_deep_object_query_params_basemodel(field_name, obj, params) - elif isinstance(obj, Dict): - _populate_deep_object_query_params_dict(field_name, obj, params) - - -def _populate_deep_object_query_params_basemodel( - prior_params_key: str, - obj: Any, - params: Dict[str, List[str]], -): - if not _is_set(obj) or not isinstance(obj, BaseModel): - return - - obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields - for name in obj_fields: - obj_field = obj_fields[name] - - f_name = obj_field.alias if obj_field.alias is not None else name - - params_key = f"{prior_params_key}[{f_name}]" - - obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) - if not _is_set(obj_param_metadata): - continue - - obj_val = getattr(obj, name) - if not _is_set(obj_val): - continue - - if isinstance(obj_val, BaseModel): - _populate_deep_object_query_params_basemodel(params_key, obj_val, params) - elif isinstance(obj_val, Dict): - _populate_deep_object_query_params_dict(params_key, obj_val, params) - elif isinstance(obj_val, List): - _populate_deep_object_query_params_list(params_key, obj_val, params) - else: - params[params_key] = [_val_to_string(obj_val)] - - -def _populate_deep_object_query_params_dict( - prior_params_key: str, - value: Dict, - params: Dict[str, List[str]], -): - if not _is_set(value): - return - - for key, val in value.items(): - if not _is_set(val): - continue - - params_key = f"{prior_params_key}[{key}]" - - if isinstance(val, BaseModel): - _populate_deep_object_query_params_basemodel(params_key, val, params) - elif isinstance(val, Dict): - _populate_deep_object_query_params_dict(params_key, val, params) - elif isinstance(val, List): - _populate_deep_object_query_params_list(params_key, val, params) - else: - params[params_key] = [_val_to_string(val)] - - -def _populate_deep_object_query_params_list( - params_key: str, - value: List, - params: Dict[str, List[str]], -): - if not _is_set(value): - return - - for val in value: - if not _is_set(val): - continue - - if params.get(params_key) is None: - params[params_key] = [] - - params[params_key].append(_val_to_string(val)) - - -def _populate_delimited_query_params( - metadata: QueryParamMetadata, - field_name: str, - obj: Any, - delimiter: str, - query_param_values: Dict[str, List[str]], -): - _populate_form( - field_name, - metadata.explode, - obj, - delimiter, - query_param_values, - ) diff --git a/src/mistralai/utils/requestbodies.py b/src/mistralai/utils/requestbodies.py deleted file mode 100644 index 1de32b6d..00000000 --- a/src/mistralai/utils/requestbodies.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import io -from dataclasses import dataclass -import re -from typing import ( - Any, - Optional, -) - -from .forms import serialize_form_data, serialize_multipart_form - -from .serializers import marshal_json - -SERIALIZATION_METHOD_TO_CONTENT_TYPE = { - "json": "application/json", - "form": "application/x-www-form-urlencoded", - "multipart": "multipart/form-data", - "raw": "application/octet-stream", - "string": "text/plain", -} - - -@dataclass -class SerializedRequestBody: - media_type: Optional[str] = None - content: Optional[Any] = None - data: Optional[Any] = None - files: Optional[Any] = None - - -def serialize_request_body( - request_body: Any, - nullable: bool, - optional: bool, - serialization_method: str, - request_body_type, -) -> Optional[SerializedRequestBody]: - if request_body is None: - if not nullable and optional: - return None - - media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] - - serialized_request_body = SerializedRequestBody(media_type) - - if re.match(r"^(application|text)\/([^+]+\+)*json.*", media_type) is not None: - serialized_request_body.content = marshal_json(request_body, request_body_type) - elif re.match(r"^multipart\/.*", media_type) is not None: - ( - serialized_request_body.media_type, - serialized_request_body.data, - serialized_request_body.files, - ) = serialize_multipart_form(media_type, request_body) - elif re.match(r"^application\/x-www-form-urlencoded.*", media_type) is not None: - serialized_request_body.data = serialize_form_data(request_body) - elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): - serialized_request_body.content = request_body - elif isinstance(request_body, str): - serialized_request_body.content = request_body - else: - raise TypeError( - f"invalid request body type {type(request_body)} for mediaType {media_type}" - ) - - return serialized_request_body diff --git a/src/mistralai/utils/retries.py b/src/mistralai/utils/retries.py deleted file mode 100644 index 88a91b10..00000000 --- a/src/mistralai/utils/retries.py +++ /dev/null @@ -1,281 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import asyncio -import random -import time -from datetime import datetime -from email.utils import parsedate_to_datetime -from typing import List, Optional - -import httpx - - -class BackoffStrategy: - initial_interval: int - max_interval: int - exponent: float - max_elapsed_time: int - - def __init__( - self, - initial_interval: int, - max_interval: int, - exponent: float, - max_elapsed_time: int, - ): - self.initial_interval = initial_interval - self.max_interval = max_interval - self.exponent = exponent - self.max_elapsed_time = max_elapsed_time - - -class RetryConfig: - strategy: str - backoff: BackoffStrategy - retry_connection_errors: bool - - def __init__( - self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool - ): - self.strategy = strategy - self.backoff = backoff - self.retry_connection_errors = retry_connection_errors - - -class Retries: - config: RetryConfig - status_codes: List[str] - - def __init__(self, config: RetryConfig, status_codes: List[str]): - self.config = config - self.status_codes = status_codes - - -class TemporaryError(Exception): - response: httpx.Response - retry_after: Optional[int] - - def __init__(self, response: httpx.Response): - self.response = response - self.retry_after = _parse_retry_after_header(response) - - -class PermanentError(Exception): - inner: Exception - - def __init__(self, inner: Exception): - self.inner = inner - - -def _parse_retry_after_header(response: httpx.Response) -> Optional[int]: - """Parse Retry-After header from response. - - Returns: - Retry interval in milliseconds, or None if header is missing or invalid. - """ - retry_after_header = response.headers.get("retry-after") - if not retry_after_header: - return None - - try: - seconds = float(retry_after_header) - return round(seconds * 1000) - except ValueError: - pass - - try: - retry_date = parsedate_to_datetime(retry_after_header) - delta = (retry_date - datetime.now(retry_date.tzinfo)).total_seconds() - return round(max(0, delta) * 1000) - except (ValueError, TypeError): - pass - - return None - - -def _get_sleep_interval( - exception: Exception, - initial_interval: int, - max_interval: int, - exponent: float, - retries: int, -) -> float: - """Get sleep interval for retry with exponential backoff. - - Args: - exception: The exception that triggered the retry. - initial_interval: Initial retry interval in milliseconds. - max_interval: Maximum retry interval in milliseconds. - exponent: Base for exponential backoff calculation. - retries: Current retry attempt count. - - Returns: - Sleep interval in seconds. - """ - if ( - isinstance(exception, TemporaryError) - and exception.retry_after is not None - and exception.retry_after > 0 - ): - return exception.retry_after / 1000 - - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - return min(sleep, max_interval / 1000) - - -def retry(func, retries: Retries): - if retries.config.strategy == "backoff": - - def do_request() -> httpx.Response: - res: httpx.Response - try: - res = func() - - for code in retries.status_codes: - if "X" in code.upper(): - code_range = int(code[0]) - - status_major = res.status_code / 100 - - if code_range <= status_major < code_range + 1: - raise TemporaryError(res) - else: - parsed_code = int(code) - - if res.status_code == parsed_code: - raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except TemporaryError: - raise - except Exception as exception: - raise PermanentError(exception) from exception - - return res - - return retry_with_backoff( - do_request, - retries.config.backoff.initial_interval, - retries.config.backoff.max_interval, - retries.config.backoff.exponent, - retries.config.backoff.max_elapsed_time, - ) - - return func() - - -async def retry_async(func, retries: Retries): - if retries.config.strategy == "backoff": - - async def do_request() -> httpx.Response: - res: httpx.Response - try: - res = await func() - - for code in retries.status_codes: - if "X" in code.upper(): - code_range = int(code[0]) - - status_major = res.status_code / 100 - - if code_range <= status_major < code_range + 1: - raise TemporaryError(res) - else: - parsed_code = int(code) - - if res.status_code == parsed_code: - raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except TemporaryError: - raise - except Exception as exception: - raise PermanentError(exception) from exception - - return res - - return await retry_with_backoff_async( - do_request, - retries.config.backoff.initial_interval, - retries.config.backoff.max_interval, - retries.config.backoff.exponent, - retries.config.backoff.max_elapsed_time, - ) - - return await func() - - -def retry_with_backoff( - func, - initial_interval=500, - max_interval=60000, - exponent=1.5, - max_elapsed_time=3600000, -): - start = round(time.time() * 1000) - retries = 0 - - while True: - try: - return func() - except PermanentError as exception: - raise exception.inner - except Exception as exception: # pylint: disable=broad-exception-caught - now = round(time.time() * 1000) - if now - start > max_elapsed_time: - if isinstance(exception, TemporaryError): - return exception.response - - raise - - sleep = _get_sleep_interval( - exception, initial_interval, max_interval, exponent, retries - ) - time.sleep(sleep) - retries += 1 - - -async def retry_with_backoff_async( - func, - initial_interval=500, - max_interval=60000, - exponent=1.5, - max_elapsed_time=3600000, -): - start = round(time.time() * 1000) - retries = 0 - - while True: - try: - return await func() - except PermanentError as exception: - raise exception.inner - except Exception as exception: # pylint: disable=broad-exception-caught - now = round(time.time() * 1000) - if now - start > max_elapsed_time: - if isinstance(exception, TemporaryError): - return exception.response - - raise - - sleep = _get_sleep_interval( - exception, initial_interval, max_interval, exponent, retries - ) - await asyncio.sleep(sleep) - retries += 1 diff --git a/src/mistralai/utils/security.py b/src/mistralai/utils/security.py deleted file mode 100644 index 3b8526bf..00000000 --- a/src/mistralai/utils/security.py +++ /dev/null @@ -1,192 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import base64 - -from typing import ( - Any, - Dict, - List, - Optional, - Tuple, -) -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .metadata import ( - SecurityMetadata, - find_field_metadata, -) -import os - - -def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: - headers: Dict[str, str] = {} - query_params: Dict[str, List[str]] = {} - - if security is None: - return headers, query_params - - if not isinstance(security, BaseModel): - raise TypeError("security must be a pydantic model") - - sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields - for name in sec_fields: - sec_field = sec_fields[name] - - value = getattr(security, name) - if value is None: - continue - - metadata = find_field_metadata(sec_field, SecurityMetadata) - if metadata is None: - continue - if metadata.option: - _parse_security_option(headers, query_params, value) - return headers, query_params - if metadata.scheme: - # Special case for basic auth or custom auth which could be a flattened model - if metadata.sub_type in ["basic", "custom"] and not isinstance( - value, BaseModel - ): - _parse_security_scheme(headers, query_params, metadata, name, security) - else: - _parse_security_scheme(headers, query_params, metadata, name, value) - - return headers, query_params - - -def get_security_from_env(security: Any, security_class: Any) -> Optional[BaseModel]: - if security is not None: - return security - - if not issubclass(security_class, BaseModel): - raise TypeError("security_class must be a pydantic model class") - - security_dict: Any = {} - - if os.getenv("MISTRAL_API_KEY"): - security_dict["api_key"] = os.getenv("MISTRAL_API_KEY") - - return security_class(**security_dict) if security_dict else None - - -def _parse_security_option( - headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any -): - if not isinstance(option, BaseModel): - raise TypeError("security option must be a pydantic model") - - opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields - for name in opt_fields: - opt_field = opt_fields[name] - - metadata = find_field_metadata(opt_field, SecurityMetadata) - if metadata is None or not metadata.scheme: - continue - _parse_security_scheme( - headers, query_params, metadata, name, getattr(option, name) - ) - - -def _parse_security_scheme( - headers: Dict[str, str], - query_params: Dict[str, List[str]], - scheme_metadata: SecurityMetadata, - field_name: str, - scheme: Any, -): - scheme_type = scheme_metadata.scheme_type - sub_type = scheme_metadata.sub_type - - if isinstance(scheme, BaseModel): - if scheme_type == "http": - if sub_type == "basic": - _parse_basic_auth_scheme(headers, scheme) - return - if sub_type == "custom": - return - - scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields - for name in scheme_fields: - scheme_field = scheme_fields[name] - - metadata = find_field_metadata(scheme_field, SecurityMetadata) - if metadata is None or metadata.field_name is None: - continue - - value = getattr(scheme, name) - - _parse_security_scheme_value( - headers, query_params, scheme_metadata, metadata, name, value - ) - else: - _parse_security_scheme_value( - headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme - ) - - -def _parse_security_scheme_value( - headers: Dict[str, str], - query_params: Dict[str, List[str]], - scheme_metadata: SecurityMetadata, - security_metadata: SecurityMetadata, - field_name: str, - value: Any, -): - scheme_type = scheme_metadata.scheme_type - sub_type = scheme_metadata.sub_type - - header_name = security_metadata.get_field_name(field_name) - - if scheme_type == "apiKey": - if sub_type == "header": - headers[header_name] = value - elif sub_type == "query": - query_params[header_name] = [value] - else: - raise ValueError("sub type {sub_type} not supported") - elif scheme_type == "openIdConnect": - headers[header_name] = _apply_bearer(value) - elif scheme_type == "oauth2": - if sub_type != "client_credentials": - headers[header_name] = _apply_bearer(value) - elif scheme_type == "http": - if sub_type == "bearer": - headers[header_name] = _apply_bearer(value) - elif sub_type == "custom": - return - else: - raise ValueError("sub type {sub_type} not supported") - else: - raise ValueError("scheme type {scheme_type} not supported") - - -def _apply_bearer(token: str) -> str: - return token.lower().startswith("bearer ") and token or f"Bearer {token}" - - -def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): - username = "" - password = "" - - if not isinstance(scheme, BaseModel): - raise TypeError("basic auth scheme must be a pydantic model") - - scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields - for name in scheme_fields: - scheme_field = scheme_fields[name] - - metadata = find_field_metadata(scheme_field, SecurityMetadata) - if metadata is None or metadata.field_name is None: - continue - - field_name = metadata.field_name - value = getattr(scheme, name) - - if field_name == "username": - username = value - if field_name == "password": - password = value - - data = f"{username}:{password}".encode() - headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/src/mistralai/utils/serializers.py b/src/mistralai/utils/serializers.py deleted file mode 100644 index 14321eb4..00000000 --- a/src/mistralai/utils/serializers.py +++ /dev/null @@ -1,229 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from decimal import Decimal -import functools -import json -import typing -from typing import Any, Dict, List, Tuple, Union, get_args -import typing_extensions -from typing_extensions import get_origin - -import httpx -from pydantic import ConfigDict, create_model -from pydantic_core import from_json - -from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset - - -def serialize_decimal(as_str: bool): - def serialize(d): - # Optional[T] is a Union[T, None] - if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: - return None - if isinstance(d, Unset): - return d - - if not isinstance(d, Decimal): - raise ValueError("Expected Decimal object") - - return str(d) if as_str else float(d) - - return serialize - - -def validate_decimal(d): - if d is None: - return None - - if isinstance(d, (Decimal, Unset)): - return d - - if not isinstance(d, (str, int, float)): - raise ValueError("Expected string, int or float") - - return Decimal(str(d)) - - -def serialize_float(as_str: bool): - def serialize(f): - # Optional[T] is a Union[T, None] - if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: - return None - if isinstance(f, Unset): - return f - - if not isinstance(f, float): - raise ValueError("Expected float") - - return str(f) if as_str else f - - return serialize - - -def validate_float(f): - if f is None: - return None - - if isinstance(f, (float, Unset)): - return f - - if not isinstance(f, str): - raise ValueError("Expected string") - - return float(f) - - -def serialize_int(as_str: bool): - def serialize(i): - # Optional[T] is a Union[T, None] - if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: - return None - if isinstance(i, Unset): - return i - - if not isinstance(i, int): - raise ValueError("Expected int") - - return str(i) if as_str else i - - return serialize - - -def validate_int(b): - if b is None: - return None - - if isinstance(b, (int, Unset)): - return b - - if not isinstance(b, str): - raise ValueError("Expected string") - - return int(b) - - -def validate_const(v): - def validate(c): - # Optional[T] is a Union[T, None] - if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: - return None - - if v != c: - raise ValueError(f"Expected {v}") - - return c - - return validate - - -def unmarshal_json(raw, typ: Any) -> Any: - return unmarshal(from_json(raw), typ) - - -def unmarshal(val, typ: Any) -> Any: - unmarshaller = create_model( - "Unmarshaller", - body=(typ, ...), - __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), - ) - - m = unmarshaller(body=val) - - # pyright: ignore[reportAttributeAccessIssue] - return m.body # type: ignore - - -def marshal_json(val, typ): - if is_nullable(typ) and val is None: - return "null" - - marshaller = create_model( - "Marshaller", - body=(typ, ...), - __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), - ) - - m = marshaller(body=val) - - d = m.model_dump(by_alias=True, mode="json", exclude_none=True) - - if len(d) == 0: - return "" - - return json.dumps(d[next(iter(d))], separators=(",", ":")) - - -def is_nullable(field): - origin = get_origin(field) - if origin is Nullable or origin is OptionalNullable: - return True - - if not origin is Union or type(None) not in get_args(field): - return False - - for arg in get_args(field): - if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: - return True - - return False - - -def is_union(obj: object) -> bool: - """ - Returns True if the given object is a typing.Union or typing_extensions.Union. - """ - return any( - obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union") - ) - - -def stream_to_text(stream: httpx.Response) -> str: - return "".join(stream.iter_text()) - - -async def stream_to_text_async(stream: httpx.Response) -> str: - return "".join([chunk async for chunk in stream.aiter_text()]) - - -def stream_to_bytes(stream: httpx.Response) -> bytes: - return stream.content - - -async def stream_to_bytes_async(stream: httpx.Response) -> bytes: - return await stream.aread() - - -def get_pydantic_model(data: Any, typ: Any) -> Any: - if not _contains_pydantic_model(data): - return unmarshal(data, typ) - - return data - - -def _contains_pydantic_model(data: Any) -> bool: - if isinstance(data, BaseModel): - return True - if isinstance(data, List): - return any(_contains_pydantic_model(item) for item in data) - if isinstance(data, Dict): - return any(_contains_pydantic_model(value) for value in data.values()) - - return False - - -@functools.cache -def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: - """ - Get typing objects by name from typing and typing_extensions. - Reference: https://typing-extensions.readthedocs.io/en/latest/#runtime-use-of-types - """ - result = tuple( - getattr(module, name) - for module in (typing, typing_extensions) - if hasattr(module, name) - ) - if not result: - raise ValueError( - f"Neither typing nor typing_extensions has an object called {name!r}" - ) - return result diff --git a/src/mistralai/utils/unmarshal_json_response.py b/src/mistralai/utils/unmarshal_json_response.py deleted file mode 100644 index 64d0b3a6..00000000 --- a/src/mistralai/utils/unmarshal_json_response.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import Any, Optional, Type, TypeVar, overload - -import httpx - -from .serializers import unmarshal_json -from mistralai import models - -T = TypeVar("T") - - -@overload -def unmarshal_json_response( - typ: Type[T], http_res: httpx.Response, body: Optional[str] = None -) -> T: ... - - -@overload -def unmarshal_json_response( - typ: Any, http_res: httpx.Response, body: Optional[str] = None -) -> Any: ... - - -def unmarshal_json_response( - typ: Any, http_res: httpx.Response, body: Optional[str] = None -) -> Any: - if body is None: - body = http_res.text - try: - return unmarshal_json(body, typ) - except Exception as e: - raise models.ResponseValidationError( - "Response validation failed", - http_res, - e, - body, - ) from e diff --git a/src/mistralai/utils/url.py b/src/mistralai/utils/url.py deleted file mode 100644 index c78ccbae..00000000 --- a/src/mistralai/utils/url.py +++ /dev/null @@ -1,155 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from decimal import Decimal -from typing import ( - Any, - Dict, - get_type_hints, - List, - Optional, - Union, - get_args, - get_origin, -) -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .metadata import ( - PathParamMetadata, - find_field_metadata, -) -from .values import ( - _get_serialized_params, - _is_set, - _populate_from_globals, - _val_to_string, -) - - -def generate_url( - server_url: str, - path: str, - path_params: Any, - gbls: Optional[Any] = None, -) -> str: - path_param_values: Dict[str, str] = {} - - globals_already_populated = _populate_path_params( - path_params, gbls, path_param_values, [] - ) - if _is_set(gbls): - _populate_path_params(gbls, None, path_param_values, globals_already_populated) - - for key, value in path_param_values.items(): - path = path.replace("{" + key + "}", value, 1) - - return remove_suffix(server_url, "/") + path - - -def _populate_path_params( - path_params: Any, - gbls: Any, - path_param_values: Dict[str, str], - skip_fields: List[str], -) -> List[str]: - globals_already_populated: List[str] = [] - - if not isinstance(path_params, BaseModel): - return globals_already_populated - - path_param_fields: Dict[str, FieldInfo] = path_params.__class__.model_fields - path_param_field_types = get_type_hints(path_params.__class__) - for name in path_param_fields: - if name in skip_fields: - continue - - field = path_param_fields[name] - - param_metadata = find_field_metadata(field, PathParamMetadata) - if param_metadata is None: - continue - - param = getattr(path_params, name) if _is_set(path_params) else None - param, global_found = _populate_from_globals( - name, param, PathParamMetadata, gbls - ) - if global_found: - globals_already_populated.append(name) - - if not _is_set(param): - continue - - f_name = field.alias if field.alias is not None else name - serialization = param_metadata.serialization - if serialization is not None: - serialized_params = _get_serialized_params( - param_metadata, f_name, param, path_param_field_types[name] - ) - for key, value in serialized_params.items(): - path_param_values[key] = value - else: - pp_vals: List[str] = [] - if param_metadata.style == "simple": - if isinstance(param, List): - for pp_val in param: - if not _is_set(pp_val): - continue - pp_vals.append(_val_to_string(pp_val)) - path_param_values[f_name] = ",".join(pp_vals) - elif isinstance(param, Dict): - for pp_key in param: - if not _is_set(param[pp_key]): - continue - if param_metadata.explode: - pp_vals.append(f"{pp_key}={_val_to_string(param[pp_key])}") - else: - pp_vals.append(f"{pp_key},{_val_to_string(param[pp_key])}") - path_param_values[f_name] = ",".join(pp_vals) - elif not isinstance(param, (str, int, float, complex, bool, Decimal)): - param_fields: Dict[str, FieldInfo] = param.__class__.model_fields - for name in param_fields: - param_field = param_fields[name] - - param_value_metadata = find_field_metadata( - param_field, PathParamMetadata - ) - if param_value_metadata is None: - continue - - param_name = ( - param_field.alias if param_field.alias is not None else name - ) - - param_field_val = getattr(param, name) - if not _is_set(param_field_val): - continue - if param_metadata.explode: - pp_vals.append( - f"{param_name}={_val_to_string(param_field_val)}" - ) - else: - pp_vals.append( - f"{param_name},{_val_to_string(param_field_val)}" - ) - path_param_values[f_name] = ",".join(pp_vals) - elif _is_set(param): - path_param_values[f_name] = _val_to_string(param) - - return globals_already_populated - - -def is_optional(field): - return get_origin(field) is Union and type(None) in get_args(field) - - -def template_url(url_with_params: str, params: Dict[str, str]) -> str: - for key, value in params.items(): - url_with_params = url_with_params.replace("{" + key + "}", value) - - return url_with_params - - -def remove_suffix(input_string, suffix): - if suffix and input_string.endswith(suffix): - return input_string[: -len(suffix)] - return input_string diff --git a/src/mistralai/utils/values.py b/src/mistralai/utils/values.py deleted file mode 100644 index dae01a44..00000000 --- a/src/mistralai/utils/values.py +++ /dev/null @@ -1,137 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from datetime import datetime -from enum import Enum -from email.message import Message -from functools import partial -import os -from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast - -from httpx import Response -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from ..types.basemodel import Unset - -from .serializers import marshal_json - -from .metadata import ParamMetadata, find_field_metadata - - -def match_content_type(content_type: str, pattern: str) -> bool: - if pattern in (content_type, "*", "*/*"): - return True - - msg = Message() - msg["content-type"] = content_type - media_type = msg.get_content_type() - - if media_type == pattern: - return True - - parts = media_type.split("/") - if len(parts) == 2: - if pattern in (f"{parts[0]}/*", f"*/{parts[1]}"): - return True - - return False - - -def match_status_codes(status_codes: List[str], status_code: int) -> bool: - if "default" in status_codes: - return True - - for code in status_codes: - if code == str(status_code): - return True - - if code.endswith("XX") and code.startswith(str(status_code)[:1]): - return True - return False - - -T = TypeVar("T") - -def cast_partial(typ): - return partial(cast, typ) - -def get_global_from_env( - value: Optional[T], env_key: str, type_cast: Callable[[str], T] -) -> Optional[T]: - if value is not None: - return value - env_value = os.getenv(env_key) - if env_value is not None: - try: - return type_cast(env_value) - except ValueError: - pass - return None - - -def match_response( - response: Response, code: Union[str, List[str]], content_type: str -) -> bool: - codes = code if isinstance(code, list) else [code] - return match_status_codes(codes, response.status_code) and match_content_type( - response.headers.get("content-type", "application/octet-stream"), content_type - ) - - -def _populate_from_globals( - param_name: str, value: Any, param_metadata_type: type, gbls: Any -) -> Tuple[Any, bool]: - if gbls is None: - return value, False - - if not isinstance(gbls, BaseModel): - raise TypeError("globals must be a pydantic model") - - global_fields: Dict[str, FieldInfo] = gbls.__class__.model_fields - found = False - for name in global_fields: - field = global_fields[name] - if name is not param_name: - continue - - found = True - - if value is not None: - return value, True - - global_value = getattr(gbls, name) - - param_metadata = find_field_metadata(field, param_metadata_type) - if param_metadata is None: - return value, True - - return global_value, True - - return value, found - - -def _val_to_string(val) -> str: - if isinstance(val, bool): - return str(val).lower() - if isinstance(val, datetime): - return str(val.isoformat().replace("+00:00", "Z")) - if isinstance(val, Enum): - return str(val.value) - - return str(val) - - -def _get_serialized_params( - metadata: ParamMetadata, field_name: str, obj: Any, typ: type -) -> Dict[str, str]: - params: Dict[str, str] = {} - - serialization = metadata.serialization - if serialization == "json": - params[field_name] = marshal_json(obj, typ) - - return params - - -def _is_set(value: Any) -> bool: - return value is not None and not isinstance(value, Unset) From ea79059477079c012e0e3a00cc7c0250bf72d914 Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:23:58 +0100 Subject: [PATCH 07/42] chore: update pyproject.toml for v2.0.0a1 and namespace packages - Update version to 2.0.0a1 - Update py.typed paths for new client/ location - Add mypy namespace_packages and explicit_package_bases settings --- pyproject.toml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2cb90876..c9003a1e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.12.0" +version = "2.0.0a1" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" @@ -63,7 +63,7 @@ default-groups = [ ] [tool.setuptools.package-data] -"*" = ["py.typed", "src/mistralai/py.typed"] +"*" = ["py.typed", "src/mistralai/client/py.typed"] [tool.hatch.build.targets.sdist] include = [ @@ -74,7 +74,7 @@ include = [ [tool.hatch.build.targets.sdist.force-include] "py.typed" = "py.typed" -"src/mistralai/py.typed" = "src/mistralai/py.typed" +"src/mistralai/client/py.typed" = "src/mistralai/client/py.typed" [tool.hatch.build.targets.wheel] include = [ @@ -97,6 +97,9 @@ pythonpath = ["src"] [tool.mypy] disable_error_code = "misc" +namespace_packages = true +explicit_package_bases = true +mypy_path = "src" [[tool.mypy.overrides]] module = "typing_inspect" From 1b84d96935a3bbec597cde11b46599ca42e26205 Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:25:12 +0100 Subject: [PATCH 08/42] feat: regenerate SDK under mistralai.client namespace Generated by Speakeasy with moduleName=mistralai.client. All SDK code now lives under src/mistralai/client/. --- .speakeasy/gen.lock | 2391 +++++++-------- .speakeasy/workflow.lock | 2 +- README.md | 64 +- USAGE.md | 16 +- docs/sdks/accesses/README.md | 6 +- docs/sdks/agents/README.md | 4 +- docs/sdks/chat/README.md | 4 +- docs/sdks/classifiers/README.md | 8 +- docs/sdks/conversations/README.md | 22 +- docs/sdks/documents/README.md | 20 +- docs/sdks/embeddings/README.md | 2 +- docs/sdks/files/README.md | 12 +- docs/sdks/fim/README.md | 4 +- docs/sdks/jobs/README.md | 10 +- docs/sdks/libraries/README.md | 10 +- docs/sdks/mistralagents/README.md | 20 +- docs/sdks/mistraljobs/README.md | 8 +- docs/sdks/models/README.md | 12 +- docs/sdks/ocr/README.md | 2 +- docs/sdks/transcriptions/README.md | 4 +- src/mistralai/client/__init__.py | 18 + src/mistralai/client/_hooks/__init__.py | 5 + src/mistralai/client/_hooks/registration.py | 13 + src/mistralai/client/_hooks/sdkhooks.py | 76 + src/mistralai/client/_hooks/types.py | 113 + src/mistralai/client/_version.py | 15 + src/mistralai/client/accesses.py | 619 ++++ src/mistralai/client/agents.py | 725 +++++ src/mistralai/client/audio.py | 23 + src/mistralai/client/basesdk.py | 370 +++ src/mistralai/client/batch.py | 20 + src/mistralai/client/beta.py | 31 + src/mistralai/client/chat.py | 753 +++++ src/mistralai/client/classifiers.py | 800 +++++ src/mistralai/client/conversations.py | 2657 +++++++++++++++++ src/mistralai/client/documents.py | 1981 ++++++++++++ src/mistralai/client/embeddings.py | 240 ++ src/mistralai/client/files.py | 1120 +++++++ src/mistralai/client/fim.py | 545 ++++ src/mistralai/client/fine_tuning.py | 20 + src/mistralai/client/httpclient.py | 125 + src/mistralai/client/jobs.py | 1067 +++++++ src/mistralai/client/libraries.py | 946 ++++++ src/mistralai/client/mistral_agents.py | 2080 +++++++++++++ src/mistralai/client/mistral_jobs.py | 799 +++++ src/mistralai/client/models/__init__.py | 2531 ++++++++++++++++ src/mistralai/client/models/agent.py | 148 + .../client/models/agentaliasresponse.py | 23 + .../client/models/agentconversation.py | 95 + .../client/models/agentcreationrequest.py | 119 + .../client/models/agenthandoffdoneevent.py | 33 + .../client/models/agenthandoffentry.py | 82 + .../client/models/agenthandoffstartedevent.py | 33 + ..._api_v1_agents_create_or_update_aliasop.py | 26 + .../models/agents_api_v1_agents_deleteop.py | 16 + .../agents_api_v1_agents_get_versionop.py | 21 + .../models/agents_api_v1_agents_getop.py | 68 + ...ts_api_v1_agents_list_version_aliasesop.py | 16 + .../agents_api_v1_agents_list_versionsop.py | 33 + .../models/agents_api_v1_agents_listop.py | 104 + .../agents_api_v1_agents_update_versionop.py | 21 + .../models/agents_api_v1_agents_updateop.py | 23 + ...ts_api_v1_conversations_append_streamop.py | 28 + .../agents_api_v1_conversations_appendop.py | 28 + .../agents_api_v1_conversations_deleteop.py | 18 + .../agents_api_v1_conversations_getop.py | 35 + .../agents_api_v1_conversations_historyop.py | 18 + .../agents_api_v1_conversations_listop.py | 80 + .../agents_api_v1_conversations_messagesop.py | 18 + ...s_api_v1_conversations_restart_streamop.py | 28 + .../agents_api_v1_conversations_restartop.py | 28 + .../client/models/agentscompletionrequest.py | 198 ++ .../models/agentscompletionstreamrequest.py | 196 ++ .../client/models/agentupdaterequest.py | 133 + src/mistralai/client/models/apiendpoint.py | 22 + .../client/models/archiveftmodelout.py | 23 + .../client/models/assistantmessage.py | 77 + src/mistralai/client/models/audiochunk.py | 20 + src/mistralai/client/models/audioencoding.py | 18 + src/mistralai/client/models/audioformat.py | 17 + .../models/audiotranscriptionrequest.py | 113 + .../models/audiotranscriptionrequeststream.py | 111 + src/mistralai/client/models/basemodelcard.py | 116 + src/mistralai/client/models/batcherror.py | 17 + src/mistralai/client/models/batchjobin.py | 88 + src/mistralai/client/models/batchjobout.py | 129 + src/mistralai/client/models/batchjobsout.py | 24 + src/mistralai/client/models/batchjobstatus.py | 15 + src/mistralai/client/models/batchrequest.py | 54 + .../client/models/builtinconnectors.py | 13 + .../models/chatclassificationrequest.py | 20 + .../client/models/chatcompletionchoice.py | 33 + .../client/models/chatcompletionrequest.py | 221 ++ .../client/models/chatcompletionresponse.py | 31 + .../models/chatcompletionstreamrequest.py | 223 ++ .../client/models/chatmoderationrequest.py | 83 + src/mistralai/client/models/checkpointout.py | 26 + .../client/models/classificationrequest.py | 74 + .../client/models/classificationresponse.py | 24 + .../models/classificationtargetresult.py | 14 + .../client/models/classifierdetailedjobout.py | 164 + .../client/models/classifierftmodelout.py | 114 + .../client/models/classifierjobout.py | 173 ++ .../client/models/classifiertargetin.py | 61 + .../client/models/classifiertargetout.py | 24 + .../models/classifiertrainingparameters.py | 79 + .../models/classifiertrainingparametersin.py | 91 + .../client/models/codeinterpretertool.py | 17 + src/mistralai/client/models/completionargs.py | 107 + .../client/models/completionargsstop.py | 13 + .../client/models/completionchunk.py | 34 + .../client/models/completiondetailedjobout.py | 171 ++ .../client/models/completionevent.py | 14 + .../client/models/completionftmodelout.py | 110 + .../client/models/completionjobout.py | 184 ++ .../models/completionresponsestreamchoice.py | 63 + .../models/completiontrainingparameters.py | 84 + .../models/completiontrainingparametersin.py | 96 + src/mistralai/client/models/contentchunk.py | 42 + .../models/conversationappendrequest.py | 38 + .../models/conversationappendstreamrequest.py | 40 + .../client/models/conversationevents.py | 78 + .../client/models/conversationhistory.py | 59 + .../client/models/conversationinputs.py | 14 + .../client/models/conversationmessages.py | 28 + .../client/models/conversationrequest.py | 160 + .../client/models/conversationresponse.py | 52 + .../models/conversationrestartrequest.py | 113 + .../conversationrestartstreamrequest.py | 117 + .../models/conversationstreamrequest.py | 166 + .../client/models/conversationusageinfo.py | 69 + ...elete_model_v1_models_model_id_deleteop.py | 18 + src/mistralai/client/models/deletefileout.py | 25 + src/mistralai/client/models/deletemodelout.py | 26 + src/mistralai/client/models/deltamessage.py | 67 + .../client/models/documentlibrarytool.py | 22 + src/mistralai/client/models/documentout.py | 127 + .../client/models/documenttextcontent.py | 13 + .../client/models/documentupdatein.py | 71 + .../client/models/documenturlchunk.py | 62 + src/mistralai/client/models/embeddingdtype.py | 13 + .../client/models/embeddingrequest.py | 90 + .../client/models/embeddingresponse.py | 28 + .../client/models/embeddingresponsedata.py | 20 + src/mistralai/client/models/encodingformat.py | 10 + src/mistralai/client/models/entitytype.py | 16 + src/mistralai/client/models/eventout.py | 61 + src/mistralai/client/models/file.py | 33 + src/mistralai/client/models/filechunk.py | 23 + src/mistralai/client/models/filepurpose.py | 15 + .../models/files_api_routes_delete_fileop.py | 16 + .../files_api_routes_download_fileop.py | 16 + .../files_api_routes_get_signed_urlop.py | 25 + .../models/files_api_routes_list_filesop.py | 109 + .../files_api_routes_retrieve_fileop.py | 16 + .../models/files_api_routes_upload_fileop.py | 40 + src/mistralai/client/models/fileschema.py | 94 + src/mistralai/client/models/filesignedurl.py | 13 + .../client/models/fimcompletionrequest.py | 130 + .../client/models/fimcompletionresponse.py | 31 + .../models/fimcompletionstreamrequest.py | 128 + .../client/models/finetuneablemodeltype.py | 10 + .../client/models/ftclassifierlossfunction.py | 10 + .../client/models/ftmodelcapabilitiesout.py | 26 + src/mistralai/client/models/ftmodelcard.py | 132 + src/mistralai/client/models/function.py | 23 + src/mistralai/client/models/functioncall.py | 23 + .../client/models/functioncallentry.py | 83 + .../models/functioncallentryarguments.py | 15 + .../client/models/functioncallevent.py | 36 + src/mistralai/client/models/functionname.py | 17 + .../client/models/functionresultentry.py | 76 + src/mistralai/client/models/functiontool.py | 21 + .../client/models/githubrepositoryin.py | 69 + .../client/models/githubrepositoryout.py | 69 + .../client/models/httpvalidationerror.py | 28 + .../client/models/imagegenerationtool.py | 17 + src/mistralai/client/models/imageurl.py | 53 + src/mistralai/client/models/imageurlchunk.py | 33 + src/mistralai/client/models/inputentries.py | 37 + src/mistralai/client/models/inputs.py | 54 + .../client/models/instructrequest.py | 42 + src/mistralai/client/models/jobin.py | 147 + src/mistralai/client/models/jobmetadataout.py | 84 + ...obs_api_routes_batch_cancel_batch_jobop.py | 16 + .../jobs_api_routes_batch_get_batch_jobop.py | 59 + .../jobs_api_routes_batch_get_batch_jobsop.py | 108 + ..._fine_tuning_archive_fine_tuned_modelop.py | 18 + ...es_fine_tuning_cancel_fine_tuning_jobop.py | 45 + ...es_fine_tuning_create_fine_tuning_jobop.py | 38 + ...outes_fine_tuning_get_fine_tuning_jobop.py | 45 + ...utes_fine_tuning_get_fine_tuning_jobsop.py | 162 + ...tes_fine_tuning_start_fine_tuning_jobop.py | 43 + ...ine_tuning_unarchive_fine_tuned_modelop.py | 18 + ...s_fine_tuning_update_fine_tuned_modelop.py | 51 + src/mistralai/client/models/jobsout.py | 41 + src/mistralai/client/models/jsonschema.py | 61 + .../client/models/legacyjobmetadataout.py | 125 + .../client/models/libraries_delete_v1op.py | 16 + .../models/libraries_documents_delete_v1op.py | 21 + ...ents_get_extracted_text_signed_url_v1op.py | 21 + ...libraries_documents_get_signed_url_v1op.py | 21 + .../libraries_documents_get_status_v1op.py | 21 + ...braries_documents_get_text_content_v1op.py | 21 + .../models/libraries_documents_get_v1op.py | 21 + .../models/libraries_documents_list_v1op.py | 97 + .../libraries_documents_reprocess_v1op.py | 21 + .../models/libraries_documents_update_v1op.py | 28 + .../models/libraries_documents_upload_v1op.py | 56 + .../client/models/libraries_get_v1op.py | 16 + .../models/libraries_share_create_v1op.py | 22 + .../models/libraries_share_delete_v1op.py | 23 + .../models/libraries_share_list_v1op.py | 16 + .../client/models/libraries_update_v1op.py | 23 + src/mistralai/client/models/libraryin.py | 56 + .../client/models/libraryinupdate.py | 53 + src/mistralai/client/models/libraryout.py | 116 + .../client/models/listdocumentout.py | 19 + src/mistralai/client/models/listfilesout.py | 58 + src/mistralai/client/models/listlibraryout.py | 15 + src/mistralai/client/models/listsharingout.py | 15 + src/mistralai/client/models/messageentries.py | 18 + .../models/messageinputcontentchunks.py | 28 + .../client/models/messageinputentry.py | 111 + .../models/messageoutputcontentchunks.py | 37 + .../client/models/messageoutputentry.py | 109 + .../client/models/messageoutputevent.py | 101 + src/mistralai/client/models/metricout.py | 60 + src/mistralai/client/models/mistralerror.py | 30 + .../client/models/mistralpromptmode.py | 12 + .../client/models/modelcapabilities.py | 41 + .../client/models/modelconversation.py | 139 + src/mistralai/client/models/modellist.py | 34 + .../client/models/moderationobject.py | 21 + .../client/models/moderationresponse.py | 21 + .../client/models/no_response_error.py | 17 + src/mistralai/client/models/ocrimageobject.py | 89 + .../client/models/ocrpagedimensions.py | 25 + src/mistralai/client/models/ocrpageobject.py | 91 + src/mistralai/client/models/ocrrequest.py | 146 + src/mistralai/client/models/ocrresponse.py | 68 + src/mistralai/client/models/ocrtableobject.py | 34 + src/mistralai/client/models/ocrusageinfo.py | 57 + .../client/models/outputcontentchunks.py | 37 + src/mistralai/client/models/paginationinfo.py | 25 + src/mistralai/client/models/prediction.py | 29 + .../client/models/processingstatusout.py | 16 + .../models/realtimetranscriptionerror.py | 27 + .../realtimetranscriptionerrordetail.py | 29 + .../models/realtimetranscriptionsession.py | 20 + .../realtimetranscriptionsessioncreated.py | 30 + .../realtimetranscriptionsessionupdated.py | 30 + src/mistralai/client/models/referencechunk.py | 20 + src/mistralai/client/models/requestsource.py | 11 + .../client/models/responsedoneevent.py | 25 + .../client/models/responseerrorevent.py | 27 + src/mistralai/client/models/responseformat.py | 60 + .../client/models/responseformats.py | 11 + .../client/models/responsestartedevent.py | 24 + .../client/models/responsevalidationerror.py | 27 + ...retrieve_model_v1_models_model_id_getop.py | 38 + .../client/models/retrievefileout.py | 97 + src/mistralai/client/models/sampletype.py | 17 + src/mistralai/client/models/sdkerror.py | 40 + src/mistralai/client/models/security.py | 25 + src/mistralai/client/models/shareenum.py | 14 + src/mistralai/client/models/sharingdelete.py | 61 + src/mistralai/client/models/sharingin.py | 65 + src/mistralai/client/models/sharingout.py | 65 + src/mistralai/client/models/source.py | 15 + src/mistralai/client/models/ssetypes.py | 19 + src/mistralai/client/models/systemmessage.py | 35 + .../models/systemmessagecontentchunks.py | 21 + src/mistralai/client/models/textchunk.py | 20 + src/mistralai/client/models/thinkchunk.py | 35 + .../client/models/timestampgranularity.py | 10 + src/mistralai/client/models/tool.py | 19 + src/mistralai/client/models/toolcall.py | 25 + src/mistralai/client/models/toolchoice.py | 25 + src/mistralai/client/models/toolchoiceenum.py | 12 + .../client/models/toolexecutiondeltaevent.py | 44 + .../client/models/toolexecutiondoneevent.py | 44 + .../client/models/toolexecutionentry.py | 86 + .../models/toolexecutionstartedevent.py | 44 + src/mistralai/client/models/toolfilechunk.py | 75 + src/mistralai/client/models/toolmessage.py | 72 + .../client/models/toolreferencechunk.py | 80 + src/mistralai/client/models/tooltypes.py | 8 + src/mistralai/client/models/trainingfile.py | 17 + .../client/models/transcriptionresponse.py | 79 + .../models/transcriptionsegmentchunk.py | 86 + .../client/models/transcriptionstreamdone.py | 85 + .../models/transcriptionstreamevents.py | 58 + .../models/transcriptionstreameventtypes.py | 12 + .../models/transcriptionstreamlanguage.py | 35 + .../models/transcriptionstreamsegmentdelta.py | 83 + .../models/transcriptionstreamtextdelta.py | 35 + .../client/models/unarchiveftmodelout.py | 23 + .../client/models/updateftmodelin.py | 53 + src/mistralai/client/models/uploadfileout.py | 94 + src/mistralai/client/models/usageinfo.py | 82 + src/mistralai/client/models/usermessage.py | 60 + .../client/models/validationerror.py | 26 + .../client/models/wandbintegration.py | 72 + .../client/models/wandbintegrationout.py | 70 + .../client/models/websearchpremiumtool.py | 17 + src/mistralai/client/models/websearchtool.py | 17 + src/mistralai/client/models_.py | 1063 +++++++ src/mistralai/client/ocr.py | 303 ++ src/mistralai/client/py.typed | 1 + src/mistralai/client/sdk.py | 222 ++ src/mistralai/client/sdkconfiguration.py | 53 + src/mistralai/client/transcriptions.py | 481 +++ src/mistralai/client/types/__init__.py | 21 + src/mistralai/client/types/basemodel.py | 77 + src/mistralai/client/utils/__init__.py | 197 ++ src/mistralai/client/utils/annotations.py | 79 + src/mistralai/client/utils/datetimes.py | 23 + src/mistralai/client/utils/enums.py | 134 + src/mistralai/client/utils/eventstreaming.py | 248 ++ src/mistralai/client/utils/forms.py | 234 ++ src/mistralai/client/utils/headers.py | 136 + src/mistralai/client/utils/logger.py | 27 + src/mistralai/client/utils/metadata.py | 118 + src/mistralai/client/utils/queryparams.py | 217 ++ src/mistralai/client/utils/requestbodies.py | 66 + src/mistralai/client/utils/retries.py | 281 ++ src/mistralai/client/utils/security.py | 192 ++ src/mistralai/client/utils/serializers.py | 229 ++ .../client/utils/unmarshal_json_response.py | 38 + src/mistralai/client/utils/url.py | 155 + src/mistralai/client/utils/values.py | 137 + uv.lock | 2 +- 333 files changed, 37507 insertions(+), 1311 deletions(-) create mode 100644 src/mistralai/client/__init__.py create mode 100644 src/mistralai/client/_hooks/__init__.py create mode 100644 src/mistralai/client/_hooks/registration.py create mode 100644 src/mistralai/client/_hooks/sdkhooks.py create mode 100644 src/mistralai/client/_hooks/types.py create mode 100644 src/mistralai/client/_version.py create mode 100644 src/mistralai/client/accesses.py create mode 100644 src/mistralai/client/agents.py create mode 100644 src/mistralai/client/audio.py create mode 100644 src/mistralai/client/basesdk.py create mode 100644 src/mistralai/client/batch.py create mode 100644 src/mistralai/client/beta.py create mode 100644 src/mistralai/client/chat.py create mode 100644 src/mistralai/client/classifiers.py create mode 100644 src/mistralai/client/conversations.py create mode 100644 src/mistralai/client/documents.py create mode 100644 src/mistralai/client/embeddings.py create mode 100644 src/mistralai/client/files.py create mode 100644 src/mistralai/client/fim.py create mode 100644 src/mistralai/client/fine_tuning.py create mode 100644 src/mistralai/client/httpclient.py create mode 100644 src/mistralai/client/jobs.py create mode 100644 src/mistralai/client/libraries.py create mode 100644 src/mistralai/client/mistral_agents.py create mode 100644 src/mistralai/client/mistral_jobs.py create mode 100644 src/mistralai/client/models/__init__.py create mode 100644 src/mistralai/client/models/agent.py create mode 100644 src/mistralai/client/models/agentaliasresponse.py create mode 100644 src/mistralai/client/models/agentconversation.py create mode 100644 src/mistralai/client/models/agentcreationrequest.py create mode 100644 src/mistralai/client/models/agenthandoffdoneevent.py create mode 100644 src/mistralai/client/models/agenthandoffentry.py create mode 100644 src/mistralai/client/models/agenthandoffstartedevent.py create mode 100644 src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py create mode 100644 src/mistralai/client/models/agents_api_v1_agents_deleteop.py create mode 100644 src/mistralai/client/models/agents_api_v1_agents_get_versionop.py create mode 100644 src/mistralai/client/models/agents_api_v1_agents_getop.py create mode 100644 src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py create mode 100644 src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py create mode 100644 src/mistralai/client/models/agents_api_v1_agents_listop.py create mode 100644 src/mistralai/client/models/agents_api_v1_agents_update_versionop.py create mode 100644 src/mistralai/client/models/agents_api_v1_agents_updateop.py create mode 100644 src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py create mode 100644 src/mistralai/client/models/agents_api_v1_conversations_appendop.py create mode 100644 src/mistralai/client/models/agents_api_v1_conversations_deleteop.py create mode 100644 src/mistralai/client/models/agents_api_v1_conversations_getop.py create mode 100644 src/mistralai/client/models/agents_api_v1_conversations_historyop.py create mode 100644 src/mistralai/client/models/agents_api_v1_conversations_listop.py create mode 100644 src/mistralai/client/models/agents_api_v1_conversations_messagesop.py create mode 100644 src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py create mode 100644 src/mistralai/client/models/agents_api_v1_conversations_restartop.py create mode 100644 src/mistralai/client/models/agentscompletionrequest.py create mode 100644 src/mistralai/client/models/agentscompletionstreamrequest.py create mode 100644 src/mistralai/client/models/agentupdaterequest.py create mode 100644 src/mistralai/client/models/apiendpoint.py create mode 100644 src/mistralai/client/models/archiveftmodelout.py create mode 100644 src/mistralai/client/models/assistantmessage.py create mode 100644 src/mistralai/client/models/audiochunk.py create mode 100644 src/mistralai/client/models/audioencoding.py create mode 100644 src/mistralai/client/models/audioformat.py create mode 100644 src/mistralai/client/models/audiotranscriptionrequest.py create mode 100644 src/mistralai/client/models/audiotranscriptionrequeststream.py create mode 100644 src/mistralai/client/models/basemodelcard.py create mode 100644 src/mistralai/client/models/batcherror.py create mode 100644 src/mistralai/client/models/batchjobin.py create mode 100644 src/mistralai/client/models/batchjobout.py create mode 100644 src/mistralai/client/models/batchjobsout.py create mode 100644 src/mistralai/client/models/batchjobstatus.py create mode 100644 src/mistralai/client/models/batchrequest.py create mode 100644 src/mistralai/client/models/builtinconnectors.py create mode 100644 src/mistralai/client/models/chatclassificationrequest.py create mode 100644 src/mistralai/client/models/chatcompletionchoice.py create mode 100644 src/mistralai/client/models/chatcompletionrequest.py create mode 100644 src/mistralai/client/models/chatcompletionresponse.py create mode 100644 src/mistralai/client/models/chatcompletionstreamrequest.py create mode 100644 src/mistralai/client/models/chatmoderationrequest.py create mode 100644 src/mistralai/client/models/checkpointout.py create mode 100644 src/mistralai/client/models/classificationrequest.py create mode 100644 src/mistralai/client/models/classificationresponse.py create mode 100644 src/mistralai/client/models/classificationtargetresult.py create mode 100644 src/mistralai/client/models/classifierdetailedjobout.py create mode 100644 src/mistralai/client/models/classifierftmodelout.py create mode 100644 src/mistralai/client/models/classifierjobout.py create mode 100644 src/mistralai/client/models/classifiertargetin.py create mode 100644 src/mistralai/client/models/classifiertargetout.py create mode 100644 src/mistralai/client/models/classifiertrainingparameters.py create mode 100644 src/mistralai/client/models/classifiertrainingparametersin.py create mode 100644 src/mistralai/client/models/codeinterpretertool.py create mode 100644 src/mistralai/client/models/completionargs.py create mode 100644 src/mistralai/client/models/completionargsstop.py create mode 100644 src/mistralai/client/models/completionchunk.py create mode 100644 src/mistralai/client/models/completiondetailedjobout.py create mode 100644 src/mistralai/client/models/completionevent.py create mode 100644 src/mistralai/client/models/completionftmodelout.py create mode 100644 src/mistralai/client/models/completionjobout.py create mode 100644 src/mistralai/client/models/completionresponsestreamchoice.py create mode 100644 src/mistralai/client/models/completiontrainingparameters.py create mode 100644 src/mistralai/client/models/completiontrainingparametersin.py create mode 100644 src/mistralai/client/models/contentchunk.py create mode 100644 src/mistralai/client/models/conversationappendrequest.py create mode 100644 src/mistralai/client/models/conversationappendstreamrequest.py create mode 100644 src/mistralai/client/models/conversationevents.py create mode 100644 src/mistralai/client/models/conversationhistory.py create mode 100644 src/mistralai/client/models/conversationinputs.py create mode 100644 src/mistralai/client/models/conversationmessages.py create mode 100644 src/mistralai/client/models/conversationrequest.py create mode 100644 src/mistralai/client/models/conversationresponse.py create mode 100644 src/mistralai/client/models/conversationrestartrequest.py create mode 100644 src/mistralai/client/models/conversationrestartstreamrequest.py create mode 100644 src/mistralai/client/models/conversationstreamrequest.py create mode 100644 src/mistralai/client/models/conversationusageinfo.py create mode 100644 src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py create mode 100644 src/mistralai/client/models/deletefileout.py create mode 100644 src/mistralai/client/models/deletemodelout.py create mode 100644 src/mistralai/client/models/deltamessage.py create mode 100644 src/mistralai/client/models/documentlibrarytool.py create mode 100644 src/mistralai/client/models/documentout.py create mode 100644 src/mistralai/client/models/documenttextcontent.py create mode 100644 src/mistralai/client/models/documentupdatein.py create mode 100644 src/mistralai/client/models/documenturlchunk.py create mode 100644 src/mistralai/client/models/embeddingdtype.py create mode 100644 src/mistralai/client/models/embeddingrequest.py create mode 100644 src/mistralai/client/models/embeddingresponse.py create mode 100644 src/mistralai/client/models/embeddingresponsedata.py create mode 100644 src/mistralai/client/models/encodingformat.py create mode 100644 src/mistralai/client/models/entitytype.py create mode 100644 src/mistralai/client/models/eventout.py create mode 100644 src/mistralai/client/models/file.py create mode 100644 src/mistralai/client/models/filechunk.py create mode 100644 src/mistralai/client/models/filepurpose.py create mode 100644 src/mistralai/client/models/files_api_routes_delete_fileop.py create mode 100644 src/mistralai/client/models/files_api_routes_download_fileop.py create mode 100644 src/mistralai/client/models/files_api_routes_get_signed_urlop.py create mode 100644 src/mistralai/client/models/files_api_routes_list_filesop.py create mode 100644 src/mistralai/client/models/files_api_routes_retrieve_fileop.py create mode 100644 src/mistralai/client/models/files_api_routes_upload_fileop.py create mode 100644 src/mistralai/client/models/fileschema.py create mode 100644 src/mistralai/client/models/filesignedurl.py create mode 100644 src/mistralai/client/models/fimcompletionrequest.py create mode 100644 src/mistralai/client/models/fimcompletionresponse.py create mode 100644 src/mistralai/client/models/fimcompletionstreamrequest.py create mode 100644 src/mistralai/client/models/finetuneablemodeltype.py create mode 100644 src/mistralai/client/models/ftclassifierlossfunction.py create mode 100644 src/mistralai/client/models/ftmodelcapabilitiesout.py create mode 100644 src/mistralai/client/models/ftmodelcard.py create mode 100644 src/mistralai/client/models/function.py create mode 100644 src/mistralai/client/models/functioncall.py create mode 100644 src/mistralai/client/models/functioncallentry.py create mode 100644 src/mistralai/client/models/functioncallentryarguments.py create mode 100644 src/mistralai/client/models/functioncallevent.py create mode 100644 src/mistralai/client/models/functionname.py create mode 100644 src/mistralai/client/models/functionresultentry.py create mode 100644 src/mistralai/client/models/functiontool.py create mode 100644 src/mistralai/client/models/githubrepositoryin.py create mode 100644 src/mistralai/client/models/githubrepositoryout.py create mode 100644 src/mistralai/client/models/httpvalidationerror.py create mode 100644 src/mistralai/client/models/imagegenerationtool.py create mode 100644 src/mistralai/client/models/imageurl.py create mode 100644 src/mistralai/client/models/imageurlchunk.py create mode 100644 src/mistralai/client/models/inputentries.py create mode 100644 src/mistralai/client/models/inputs.py create mode 100644 src/mistralai/client/models/instructrequest.py create mode 100644 src/mistralai/client/models/jobin.py create mode 100644 src/mistralai/client/models/jobmetadataout.py create mode 100644 src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py create mode 100644 src/mistralai/client/models/jobsout.py create mode 100644 src/mistralai/client/models/jsonschema.py create mode 100644 src/mistralai/client/models/legacyjobmetadataout.py create mode 100644 src/mistralai/client/models/libraries_delete_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_delete_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_get_status_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_get_text_content_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_get_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_list_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_reprocess_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_update_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_upload_v1op.py create mode 100644 src/mistralai/client/models/libraries_get_v1op.py create mode 100644 src/mistralai/client/models/libraries_share_create_v1op.py create mode 100644 src/mistralai/client/models/libraries_share_delete_v1op.py create mode 100644 src/mistralai/client/models/libraries_share_list_v1op.py create mode 100644 src/mistralai/client/models/libraries_update_v1op.py create mode 100644 src/mistralai/client/models/libraryin.py create mode 100644 src/mistralai/client/models/libraryinupdate.py create mode 100644 src/mistralai/client/models/libraryout.py create mode 100644 src/mistralai/client/models/listdocumentout.py create mode 100644 src/mistralai/client/models/listfilesout.py create mode 100644 src/mistralai/client/models/listlibraryout.py create mode 100644 src/mistralai/client/models/listsharingout.py create mode 100644 src/mistralai/client/models/messageentries.py create mode 100644 src/mistralai/client/models/messageinputcontentchunks.py create mode 100644 src/mistralai/client/models/messageinputentry.py create mode 100644 src/mistralai/client/models/messageoutputcontentchunks.py create mode 100644 src/mistralai/client/models/messageoutputentry.py create mode 100644 src/mistralai/client/models/messageoutputevent.py create mode 100644 src/mistralai/client/models/metricout.py create mode 100644 src/mistralai/client/models/mistralerror.py create mode 100644 src/mistralai/client/models/mistralpromptmode.py create mode 100644 src/mistralai/client/models/modelcapabilities.py create mode 100644 src/mistralai/client/models/modelconversation.py create mode 100644 src/mistralai/client/models/modellist.py create mode 100644 src/mistralai/client/models/moderationobject.py create mode 100644 src/mistralai/client/models/moderationresponse.py create mode 100644 src/mistralai/client/models/no_response_error.py create mode 100644 src/mistralai/client/models/ocrimageobject.py create mode 100644 src/mistralai/client/models/ocrpagedimensions.py create mode 100644 src/mistralai/client/models/ocrpageobject.py create mode 100644 src/mistralai/client/models/ocrrequest.py create mode 100644 src/mistralai/client/models/ocrresponse.py create mode 100644 src/mistralai/client/models/ocrtableobject.py create mode 100644 src/mistralai/client/models/ocrusageinfo.py create mode 100644 src/mistralai/client/models/outputcontentchunks.py create mode 100644 src/mistralai/client/models/paginationinfo.py create mode 100644 src/mistralai/client/models/prediction.py create mode 100644 src/mistralai/client/models/processingstatusout.py create mode 100644 src/mistralai/client/models/realtimetranscriptionerror.py create mode 100644 src/mistralai/client/models/realtimetranscriptionerrordetail.py create mode 100644 src/mistralai/client/models/realtimetranscriptionsession.py create mode 100644 src/mistralai/client/models/realtimetranscriptionsessioncreated.py create mode 100644 src/mistralai/client/models/realtimetranscriptionsessionupdated.py create mode 100644 src/mistralai/client/models/referencechunk.py create mode 100644 src/mistralai/client/models/requestsource.py create mode 100644 src/mistralai/client/models/responsedoneevent.py create mode 100644 src/mistralai/client/models/responseerrorevent.py create mode 100644 src/mistralai/client/models/responseformat.py create mode 100644 src/mistralai/client/models/responseformats.py create mode 100644 src/mistralai/client/models/responsestartedevent.py create mode 100644 src/mistralai/client/models/responsevalidationerror.py create mode 100644 src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py create mode 100644 src/mistralai/client/models/retrievefileout.py create mode 100644 src/mistralai/client/models/sampletype.py create mode 100644 src/mistralai/client/models/sdkerror.py create mode 100644 src/mistralai/client/models/security.py create mode 100644 src/mistralai/client/models/shareenum.py create mode 100644 src/mistralai/client/models/sharingdelete.py create mode 100644 src/mistralai/client/models/sharingin.py create mode 100644 src/mistralai/client/models/sharingout.py create mode 100644 src/mistralai/client/models/source.py create mode 100644 src/mistralai/client/models/ssetypes.py create mode 100644 src/mistralai/client/models/systemmessage.py create mode 100644 src/mistralai/client/models/systemmessagecontentchunks.py create mode 100644 src/mistralai/client/models/textchunk.py create mode 100644 src/mistralai/client/models/thinkchunk.py create mode 100644 src/mistralai/client/models/timestampgranularity.py create mode 100644 src/mistralai/client/models/tool.py create mode 100644 src/mistralai/client/models/toolcall.py create mode 100644 src/mistralai/client/models/toolchoice.py create mode 100644 src/mistralai/client/models/toolchoiceenum.py create mode 100644 src/mistralai/client/models/toolexecutiondeltaevent.py create mode 100644 src/mistralai/client/models/toolexecutiondoneevent.py create mode 100644 src/mistralai/client/models/toolexecutionentry.py create mode 100644 src/mistralai/client/models/toolexecutionstartedevent.py create mode 100644 src/mistralai/client/models/toolfilechunk.py create mode 100644 src/mistralai/client/models/toolmessage.py create mode 100644 src/mistralai/client/models/toolreferencechunk.py create mode 100644 src/mistralai/client/models/tooltypes.py create mode 100644 src/mistralai/client/models/trainingfile.py create mode 100644 src/mistralai/client/models/transcriptionresponse.py create mode 100644 src/mistralai/client/models/transcriptionsegmentchunk.py create mode 100644 src/mistralai/client/models/transcriptionstreamdone.py create mode 100644 src/mistralai/client/models/transcriptionstreamevents.py create mode 100644 src/mistralai/client/models/transcriptionstreameventtypes.py create mode 100644 src/mistralai/client/models/transcriptionstreamlanguage.py create mode 100644 src/mistralai/client/models/transcriptionstreamsegmentdelta.py create mode 100644 src/mistralai/client/models/transcriptionstreamtextdelta.py create mode 100644 src/mistralai/client/models/unarchiveftmodelout.py create mode 100644 src/mistralai/client/models/updateftmodelin.py create mode 100644 src/mistralai/client/models/uploadfileout.py create mode 100644 src/mistralai/client/models/usageinfo.py create mode 100644 src/mistralai/client/models/usermessage.py create mode 100644 src/mistralai/client/models/validationerror.py create mode 100644 src/mistralai/client/models/wandbintegration.py create mode 100644 src/mistralai/client/models/wandbintegrationout.py create mode 100644 src/mistralai/client/models/websearchpremiumtool.py create mode 100644 src/mistralai/client/models/websearchtool.py create mode 100644 src/mistralai/client/models_.py create mode 100644 src/mistralai/client/ocr.py create mode 100644 src/mistralai/client/py.typed create mode 100644 src/mistralai/client/sdk.py create mode 100644 src/mistralai/client/sdkconfiguration.py create mode 100644 src/mistralai/client/transcriptions.py create mode 100644 src/mistralai/client/types/__init__.py create mode 100644 src/mistralai/client/types/basemodel.py create mode 100644 src/mistralai/client/utils/__init__.py create mode 100644 src/mistralai/client/utils/annotations.py create mode 100644 src/mistralai/client/utils/datetimes.py create mode 100644 src/mistralai/client/utils/enums.py create mode 100644 src/mistralai/client/utils/eventstreaming.py create mode 100644 src/mistralai/client/utils/forms.py create mode 100644 src/mistralai/client/utils/headers.py create mode 100644 src/mistralai/client/utils/logger.py create mode 100644 src/mistralai/client/utils/metadata.py create mode 100644 src/mistralai/client/utils/queryparams.py create mode 100644 src/mistralai/client/utils/requestbodies.py create mode 100644 src/mistralai/client/utils/retries.py create mode 100644 src/mistralai/client/utils/security.py create mode 100644 src/mistralai/client/utils/serializers.py create mode 100644 src/mistralai/client/utils/unmarshal_json_response.py create mode 100644 src/mistralai/client/utils/url.py create mode 100644 src/mistralai/client/utils/values.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index f6c0f0a2..7aae1acb 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -5,19 +5,20 @@ management: docVersion: 1.0.0 speakeasyVersion: 1.685.0 generationVersion: 2.794.1 - releaseVersion: 1.12.0 - configChecksum: 862d9a8667674972c091f9db84d42ba0 + releaseVersion: 2.0.0a1 + configChecksum: d5e0f55b62bca3e8aab33c7955415e61 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true persistentEdits: - generation_id: 00cab5ea-60fa-456d-ad3f-1ae32427d619 - pristine_commit_hash: b6e4b5c0cd6a42df18b2e7aa44ac696d48576d06 - pristine_tree_hash: b358b046bcef8a5f9b8898d98a4d9fbf82b52e6e + generation_id: edcb81a1-4bcb-439e-bfcb-f30eaac48c6a + pristine_commit_hash: b192b65dd75820612c5c672593ed322d420d2c73 + pristine_tree_hash: 869c5c810e502634a018e5792d4c2efe2686dbad features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 + configurableModuleName: 0.2.0 constsAndDefaults: 1.0.5 core: 5.23.18 customCodeRegions: 0.1.1 @@ -57,8 +58,8 @@ trackedFiles: pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae USAGE.md: id: 3aed33ce6e6f - last_write_checksum: sha1:4b34a680cd5a2b2acbadc41d0b309b3f30c1dfe5 - pristine_git_object: a31d502f33508216f686f4328cbbc8c14f8170ee + last_write_checksum: sha1:b1cf4cc87111df10c55731b3f5abad22890387a2 + pristine_git_object: 1810386448a440cfc5f7b8579695b228ae40460d docs/models/agent.md: id: ffdbb4c53c87 last_write_checksum: sha1:ec6c799658040b3c75d6ae0572bb391c6aea3fd4 @@ -1781,68 +1782,68 @@ trackedFiles: pristine_git_object: 57b6acbbd3b85aae5a9b7e2f754689637c01a912 docs/sdks/accesses/README.md: id: 2ea167c2eff2 - last_write_checksum: sha1:ac4ec473f9991ea2ca3e66838f8f791a54d881e3 - pristine_git_object: 040bc24c6acb9153296e105009ac4ef251cc2dd4 + last_write_checksum: sha1:22bd7a11d44295c2f433955604d3578292f26c99 + pristine_git_object: 64a1e749aeb6f2c32497a72a649ecc2b7549c077 docs/sdks/agents/README.md: id: 5965d8232fd8 - last_write_checksum: sha1:f368d2c40ad72aa9e8de04809bd300e935dbb63b - pristine_git_object: 173925eead663741af81d5f624c2964278bde979 + last_write_checksum: sha1:34e01f46c1a32020fa3eeb40fe80c3c0e8de0983 + pristine_git_object: 75efc492c4114417c22a796824ee971e9180104e docs/sdks/chat/README.md: id: 393193527c2c - last_write_checksum: sha1:931ab91704f496b220c7da1aa985cea14d969784 - pristine_git_object: 5bb24baa3444d72faace5473d0a775a0e5ad403e + last_write_checksum: sha1:7bc2201f585bea247c0bb148ecdea220bcb384e1 + pristine_git_object: 89c4fffbb777427723307b13c124668601ff5839 docs/sdks/classifiers/README.md: id: 74eb09b8d620 - last_write_checksum: sha1:d047af486fd4acd7f813232b20164eab11541c2d - pristine_git_object: e76efb79d8b1353208b42619f4cc5b688ef5d561 + last_write_checksum: sha1:f424721545e683e230ee0c612765be2bdb9897cd + pristine_git_object: 634ee419f3334ba50dd25f0e2340c32db1ec40b3 docs/sdks/conversations/README.md: id: e22a9d2c5424 - last_write_checksum: sha1:06b7381c76c258e2a2dca3764456105929d98315 - pristine_git_object: ca383176a8b349cbaa757690b3f7a2cefe22cb1a + last_write_checksum: sha1:5ed03d60808cff2539e0e83df4714b3a274208a0 + pristine_git_object: acd43cdb63edd23665e808aaccc6ab3a4dc3dc85 docs/sdks/documents/README.md: id: 9758e88a0a9d - last_write_checksum: sha1:84791e86c3b9c15f8fd16d2a3df6bd3685023a69 - pristine_git_object: d3f5a9757c2327dab8e5b1962542b37c5e2551af + last_write_checksum: sha1:d9bcb4bf6c2189c282844f81b456fb29654e384c + pristine_git_object: d90e7ee7aab234cb992a904088cbbf2e57dd0baa docs/sdks/embeddings/README.md: id: 15b5b04486c1 - last_write_checksum: sha1:4da183aaf0df15d3a027077784903d93d8ea58e0 - pristine_git_object: 4390b7bd999a75a608f324f685b2284a8fa277ec + last_write_checksum: sha1:46e57c7808ce9c24dd54c3562379d2ff3e0526e8 + pristine_git_object: 0be7ea6dcace678d12d7e7e4f8e88daf7570df5d docs/sdks/files/README.md: id: e576d7a117f0 - last_write_checksum: sha1:99d15a4acce49d5eca853b5a08fd81e76581dc52 - pristine_git_object: 57b53fc75208f4f6361636690b91564148448633 + last_write_checksum: sha1:22298532be84a02d4fc8a524d6baa4fab0adcec4 + pristine_git_object: 44c39f8a3bd783b5c592e4f22c453bd76cef434a docs/sdks/fim/README.md: id: 499b227bf6ca - last_write_checksum: sha1:824f7d1b58ff0b650367737c0e9b91a9d2d14a45 - pristine_git_object: db6f2e1b65866e1309d94e852fa0a1e82d2606fd + last_write_checksum: sha1:34ff7167b0597bf668ef75ede016cb8884372d1b + pristine_git_object: 3c8c59c79db12c916577d6c064ddb16a511513fd docs/sdks/jobs/README.md: id: 7371cdc8b89a - last_write_checksum: sha1:5117aebda0558e7b82150f0b91480e3362687a89 - pristine_git_object: 666224a728cc433bca9520437d36a2b526ac2df6 + last_write_checksum: sha1:5dcd708cfcbb00d0ab9d41311c363c6fdae101b0 + pristine_git_object: 9c44be7559e2b7127d43ff50777fd32c7cf8b6ee docs/sdks/libraries/README.md: id: df9a982905a3 - last_write_checksum: sha1:8769d4b43f93c744fca43c34a7d7e9d99122c886 - pristine_git_object: e672c190ad6ac4623f99357d7e59d52f6722518f + last_write_checksum: sha1:0c710c0395906333b85bedd516cfca7dcb3b9b42 + pristine_git_object: bbdacf0538c6c055fef0c0109aac163e987a3dd5 docs/sdks/mistralagents/README.md: id: 20b3478ad16d - last_write_checksum: sha1:c4e73cd96136392d01b0ce2a57bf0854d05688c0 - pristine_git_object: bdd8d588d88f4929c3b33bcecd72bbb5fce7402d + last_write_checksum: sha1:b2dcb1516dd05dc38e0e0305969de248994aade4 + pristine_git_object: fe0f6e35a445e17ccedc2031c4b4204f5cc4d650 docs/sdks/mistraljobs/README.md: id: 71aafa44d228 - last_write_checksum: sha1:255a4221b3b61ef247b39c9723a78408cda486d3 - pristine_git_object: f1aa3f61973b1ee48777afb7fecc4bdf459882a0 + last_write_checksum: sha1:212bc82280a58f896172d173e5be516b926bc11c + pristine_git_object: 8f2358de28e88ffd1e3750292488c486f7bb893b docs/sdks/models/README.md: id: b35bdf4bc7ed - last_write_checksum: sha1:8e256360d014fc3384256a9f155c6382f8e16a6d - pristine_git_object: d51866b6cff74932bf86c266f75773c2d3e74fd0 + last_write_checksum: sha1:ca13e994ae31ddf37628eba9cc68cf8f64b48404 + pristine_git_object: 6fa28ca2e25c0b2f3fbf044b706d19f01193fc3c docs/sdks/ocr/README.md: id: 545e35d2613e - last_write_checksum: sha1:25846e2fe16ecb69d94c0d53edb74c22419c49aa - pristine_git_object: efcb99314c7d07a3dc556c297333046fc5d9e097 + last_write_checksum: sha1:a8d22a86b79a0166ecec26a3e9379fa110d49b73 + pristine_git_object: 9fd9d6fc14c5874dbb819239ea677a171a26969b docs/sdks/transcriptions/README.md: id: 089cf94ecf47 - last_write_checksum: sha1:01e68371b7a94cb35d6435efd3ef9247e8c27a94 - pristine_git_object: dabab00e85a3f480c8dc3dd7b792e68420ae08b6 + last_write_checksum: sha1:493070fcce7cec1a627b04daa31c38a6745659e7 + pristine_git_object: 9691b81d3a7eb27d7b2b489408d32513859646c9 py.typed: id: 258c3ed47ae4 last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 @@ -1851,1248 +1852,1248 @@ trackedFiles: id: fe273b08f514 last_write_checksum: sha1:b290b25b36dca3d5eb1a2e66a2e1bcf2e7326cf3 pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 - src/mistralai/__init__.py: - id: 7aaa1403a9fc + src/mistralai/client/__init__.py: + id: f1b791f9d2a5 last_write_checksum: sha1:36306d1d404b6aeb912d27f1d9c52f098ff7bf9b pristine_git_object: dd02e42e4cc509dc90e6ae70493054021faa5f9c - src/mistralai/_hooks/__init__.py: - id: 89bd3648c8ca + src/mistralai/client/_hooks/__init__.py: + id: cef9ff97efd7 last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 - src/mistralai/_hooks/sdkhooks.py: - id: a085b78b3f45 - last_write_checksum: sha1:1d9666df503110a00569c2a79886ac3be49a3ffb - pristine_git_object: 1f9a9316c430821226ada4db2b37f87083f1c326 - src/mistralai/_hooks/types.py: - id: 066b285c9341 - last_write_checksum: sha1:16bf3c53068c38ba0f838172787178c883551283 - pristine_git_object: 6d0f3e1166cb0271f89f5ba83441c88199d7a432 - src/mistralai/_version.py: - id: 37b53ba66d7f - last_write_checksum: sha1:a4d76992b028e2d138e2f7f6d3087c2a606a21c7 - pristine_git_object: 6ee91593a9fbcd6c53eae810c1c2d0120f56262e - src/mistralai/accesses.py: - id: 98cb4addd052 - last_write_checksum: sha1:5d9d495274d67b1343ba99d755c1c01c64c2ead1 - pristine_git_object: be02ee5bafa1b10a52e79d1ad5481fa80908d99a - src/mistralai/agents.py: - id: aa07ea92bffb - last_write_checksum: sha1:2a760562daf1a01a66e5250658dffc5043e3c8ea - pristine_git_object: 73e4ee3c885f7c3472a9dc5c0546c02d4e19a1c4 - src/mistralai/audio.py: - id: c398f6a11e24 - last_write_checksum: sha1:aa75fa00e00d8059121d8de60844d70d50203661 - pristine_git_object: 3de29053f34654907c423ca6600f216f6b0dcbe0 - src/mistralai/basesdk.py: - id: 3127264590ce - last_write_checksum: sha1:5340f1c5976fd87d3b17b285535b63bbbe7db120 - pristine_git_object: c9a32aa13eae485d0159632dadbfbb2452978709 - src/mistralai/batch.py: - id: 60df0c5efce3 - last_write_checksum: sha1:9d463fd6ac747635ab2b0e61c918a098aae5a370 - pristine_git_object: 7ed7ccefdaab2368dc7bb9fa8c718a05dcec3ca6 - src/mistralai/beta.py: - id: 7d1c8d453249 - last_write_checksum: sha1:780b45086f215d1f04983d1ea6c89acc16475cfc - pristine_git_object: 4bbf1fa36053c6754026285f3a149911b653d735 - src/mistralai/chat.py: - id: cb76f81a1426 - last_write_checksum: sha1:cf0a3b1b2d1163cb96c0c57d4cf0bede556c02b1 - pristine_git_object: 1528c4c93fc8b5f5d02976db836a1cefda4d1e57 - src/mistralai/classifiers.py: - id: a8f7d4c1c787 - last_write_checksum: sha1:6eabb0ba04fdf77d4bb9b45399c6f2ce55fe8317 - pristine_git_object: 7c32506ec03cc0fd88b786ff49d7690fd4283d2a - src/mistralai/conversations.py: - id: be58e57a6198 - last_write_checksum: sha1:b9287bbe777a042b8258494cd5162d32e6a89c20 - pristine_git_object: 194cb4c0a629654b31bbcce8391baf48601d0eb7 - src/mistralai/documents.py: - id: 1945602083a8 - last_write_checksum: sha1:14d1e6b5a95869d70a6fc89b07d5365c98aff5d7 - pristine_git_object: fac58fdb2e76668911fc6c59918b1b444aed0bd5 - src/mistralai/embeddings.py: - id: 2bbb9b5427d7 - last_write_checksum: sha1:842f784ab976936902be23331b672bdba8c88bc9 - pristine_git_object: 7430f8042df4fec517288d0ddb0eb174e7e43a8e - src/mistralai/files.py: - id: 0e29db0e2269 - last_write_checksum: sha1:d79d5b1785f441a46673a7efa108ddb98c44376a - pristine_git_object: 90ada0ff707521d59d329bebac74005eb68488d8 - src/mistralai/fim.py: - id: 71a865142baf - last_write_checksum: sha1:7accf79c11a17fefbacde7f2b0f966f3716233df - pristine_git_object: 53109c70f0ad9844a4c445a5ed674f675b24d274 - src/mistralai/fine_tuning.py: - id: 12578f7d13a6 - last_write_checksum: sha1:e48227f7ea5b51d837e7619f59582e663eb94ed1 - pristine_git_object: 8ed5788a58ab2e9d1125b30624c734a602084294 - src/mistralai/httpclient.py: - id: dcfb0dd6b386 + src/mistralai/client/_hooks/sdkhooks.py: + id: ed1e485b2153 + last_write_checksum: sha1:5688b56bf910f5f176bcacc58f4ad440ac2fa169 + pristine_git_object: c9318db481df2293b37e9b964da417ee5de86911 + src/mistralai/client/_hooks/types.py: + id: 85cfedfb7582 + last_write_checksum: sha1:ea20450ab595abb6ad744ecbd58927e8fa1ce520 + pristine_git_object: e7e1bb7f61527de6095357e4f2ab11e342a4af87 + src/mistralai/client/_version.py: + id: cc807b30de19 + last_write_checksum: sha1:e654adbd2f066332b48c68d97e995dcc8f7dde84 + pristine_git_object: 8c5d6e54860c69881bf976887910fc32d183c6e5 + src/mistralai/client/accesses.py: + id: 76fc53bfcf59 + last_write_checksum: sha1:da6c930bfec52d4cc344408f0aaef2874705fa68 + pristine_git_object: 307c7156626e735c802c149ea3547648ea03da09 + src/mistralai/client/agents.py: + id: e946546e3eaa + last_write_checksum: sha1:4a2bc22e5a6d9aee56d04d2800084eb326ef9ba7 + pristine_git_object: c04abd21b5b7cb9b8ddfdb52ec67fffa7d21759a + src/mistralai/client/audio.py: + id: 7a8ed2e90d61 + last_write_checksum: sha1:9ecd271eedf02703b45e6bc4280df10ed2edbbc8 + pristine_git_object: 28ccda1b533b4cef31844bddae2289268b459a24 + src/mistralai/client/basesdk.py: + id: 7518c67b81ea + last_write_checksum: sha1:c10ba4619b8c70ff876304a93b432d4466cb7112 + pristine_git_object: bddc9012f28f7881b75a720a07a3ad60845e472e + src/mistralai/client/batch.py: + id: cffe114c7ac7 + last_write_checksum: sha1:b7236249d2a6235fc3834b2c3bba3feda838013e + pristine_git_object: d53a45fbcbeb7b1d8fb29c373101c9e2a586b877 + src/mistralai/client/beta.py: + id: 981417f45147 + last_write_checksum: sha1:2cf61e620e0e0e969e951d100e42c8c9b8facd27 + pristine_git_object: b30003eae52be5e79838fe994cda8474068a43dc + src/mistralai/client/chat.py: + id: 7eba0f088d47 + last_write_checksum: sha1:46321214352946f2077a0f60c4c903c354a42da1 + pristine_git_object: 9c50bce81c264c70256b2ff8716e88216a78535f + src/mistralai/client/classifiers.py: + id: 26e773725732 + last_write_checksum: sha1:b3bed5a404f8837cc12e516f3fb85f47fd37518a + pristine_git_object: 537e2438afcb570a3e436ab4dd8b7d604b35b627 + src/mistralai/client/conversations.py: + id: 40692a878064 + last_write_checksum: sha1:fc75dc4099891c8cbfbcc72284bf8e7dbbb834a5 + pristine_git_object: 9caf42214daf262b15bac5b36467700ee17cd7d1 + src/mistralai/client/documents.py: + id: bcc17286c31c + last_write_checksum: sha1:82287ef513f2f5ee1acb9ffe8323f2dad0fc86f4 + pristine_git_object: 009a604f1c2fa367d14df7fb9f4078083c4be501 + src/mistralai/client/embeddings.py: + id: f9c17258207e + last_write_checksum: sha1:a3fa049388bf794ed764a1a8b6736f6c29136c83 + pristine_git_object: 359f2f621d1628536b89f66115726064db34e51b + src/mistralai/client/files.py: + id: f12df4b2ce43 + last_write_checksum: sha1:72c1fda19adff9042461f498d5859bae62d4603a + pristine_git_object: 97817eab1a4b0a0649a128b06a9f3ff4077dffa5 + src/mistralai/client/fim.py: + id: 217bea5d701d + last_write_checksum: sha1:d62f3bee1322a41aefc0cc01aa8313e8b7e3ae1b + pristine_git_object: 4a834fe93a9b9a8af30f681c9541a7cef0a513e1 + src/mistralai/client/fine_tuning.py: + id: 5d5079bbd54e + last_write_checksum: sha1:e8061f6bb9912d668249c3c20235e9778345d23b + pristine_git_object: c57425fdf3225eaeccee47a17db198a3974995a3 + src/mistralai/client/httpclient.py: + id: 3e46bde74327 last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 pristine_git_object: 89560b566073785535643e694c112bedbd3db13d - src/mistralai/jobs.py: - id: 6869267a98bf - last_write_checksum: sha1:e771ca001a64cc3be33964e95393495a16ab3d8c - pristine_git_object: df8ae4d3489f2791586ac6399bfe6039522f09b4 - src/mistralai/libraries.py: - id: e5b244f28b27 - last_write_checksum: sha1:7084d7b61238494f834fe20dcf387810e77f3eb0 - pristine_git_object: 32648937feb79adf6155423cbe9bac4d7fe52224 - src/mistralai/mistral_agents.py: - id: 671c4985aaa1 - last_write_checksum: sha1:1fe4fb4f2828b532ac3ddf3b72e748a53d5099e9 - pristine_git_object: 7fb0ce259cb1c1a3847c567bdc992c176489add6 - src/mistralai/mistral_jobs.py: - id: 18065a449da0 - last_write_checksum: sha1:fb205d962444f6aba163ecd3169c12489b3f0cc9 - pristine_git_object: d1aeec8a014b22e44f4fe5e751206c3648e875af - src/mistralai/models/__init__.py: - id: 3228134f03e5 + src/mistralai/client/jobs.py: + id: 22e6e695e52b + last_write_checksum: sha1:a040fec9c1a50ec603e2cd22284db526c177a55b + pristine_git_object: 848926eaca286f74b5cfd4b0f0f72a8e2222c52f + src/mistralai/client/libraries.py: + id: d43a5f78045f + last_write_checksum: sha1:5264a24b973f49b4ea7252868f4a76baba9093b4 + pristine_git_object: 03a547410e042c19329ea9a91eef1bf25ecdcbe1 + src/mistralai/client/mistral_agents.py: + id: bd22ff89d9bb + last_write_checksum: sha1:7b6d1ac9256c1f958bbc9cf18355b4407f0cffc4 + pristine_git_object: 2ac7a29e4d7ab72c5fa29d13e7a8e4648906ead0 + src/mistralai/client/mistral_jobs.py: + id: e925bb9b27ce + last_write_checksum: sha1:b1d8ecfe998d64637089eb4a5a4cfdf4735717d1 + pristine_git_object: eae4403326ecfdf432a1ca7feb260ffe8ec251cf + src/mistralai/client/models/__init__.py: + id: e0e8dad92725 last_write_checksum: sha1:cb1fb02e33b85bf82db7d6fd15b2cc3b109c5060 pristine_git_object: 23e652220f29a882748661a8c0d21aa2830471bf - src/mistralai/models/agent.py: - id: ca4162a131b1 - last_write_checksum: sha1:fe8a7c8c9c4ba59613d7d89f0c2e7a6958e25f85 - pristine_git_object: eb30905b3de2b69ece35bdd40f390b2fa6ffc5a8 - src/mistralai/models/agentaliasresponse.py: - id: d329dd68429e - last_write_checksum: sha1:a3ebf39f159f7cd63dbabd9ff2c79df97e43e41f - pristine_git_object: c0928da9c65c588c515f3f1668ccfb69d3a23861 - src/mistralai/models/agentconversation.py: - id: bd3035451c40 - last_write_checksum: sha1:724a256f4914116500fd962df4b3cfc79ea75c43 - pristine_git_object: 6007b5715fd4a463d25a244b716effafbeecace6 - src/mistralai/models/agentcreationrequest.py: - id: 87f33bd9ea58 - last_write_checksum: sha1:a6885376d36a5a17273d8d8d8d45e3d6c3ee1b9f - pristine_git_object: 6a14201eca82f26871ab4f87e547a5e9bcf3b933 - src/mistralai/models/agenthandoffdoneevent.py: - id: 496685a9343b - last_write_checksum: sha1:f03d37569960b56155e977aa68fbbaad8e25f687 - pristine_git_object: 1cdbf45652ff70d045c650734ab6bdc0eca97734 - src/mistralai/models/agenthandoffentry.py: - id: 836045caeb8f - last_write_checksum: sha1:e5c6b73014cd6859a47cb5958cdfa7b105e3aa3e - pristine_git_object: 66136256215caf7c1f174deec70ab9fbfff634fc - src/mistralai/models/agenthandoffstartedevent.py: - id: ce8e306fa522 - last_write_checksum: sha1:2b5bac2f628c0e7cdd6df73404f69f5d405e576c - pristine_git_object: 11bfa918903f8de96f98f722eaaf9a70b4fca8c1 - src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py: - id: dd0e03fda847 - last_write_checksum: sha1:a0dd39bb4b0af3a15b1aa8427a6f07d1826c04dc - pristine_git_object: 6cf9d0e0644ce0afd5f673f18fdda9dcccb5f04c - src/mistralai/models/agents_api_v1_agents_deleteop.py: - id: 588791d168a1 - last_write_checksum: sha1:2dae37c3b9778d688663550b9803d52111577f3e - pristine_git_object: 38e04953cc320f503a2f6e77096985da60896f2a - src/mistralai/models/agents_api_v1_agents_get_versionop.py: - id: bdb81ef0e35a - last_write_checksum: sha1:372da3794afd45d442d56edd3ec3cc4907f88223 - pristine_git_object: fddb10dde6707b6641b035e372270991d349f4f3 - src/mistralai/models/agents_api_v1_agents_getop.py: - id: 2358eceee519 - last_write_checksum: sha1:dca59474f75a6636ecac8265cab1bb51d36df56a - pristine_git_object: 2b7d89a5b34f3e768a18f9edbdf712fbcf5c20e4 - src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py: - id: 51215b825530 - last_write_checksum: sha1:d24f8eff3bd19414c0a04e474b33e1c63861a1da - pristine_git_object: 650a7187a3ac419069440fe040a166a036835b37 - src/mistralai/models/agents_api_v1_agents_list_versionsop.py: - id: 5f680df288a9 - last_write_checksum: sha1:a236170f366d9701346b57f9ee4c788a9a2293e5 - pristine_git_object: cf988b3d3b5130ff49f7ec0accb30a8e9dbfe4e1 - src/mistralai/models/agents_api_v1_agents_listop.py: - id: 15579851e4fe - last_write_checksum: sha1:1268af12d397f86e0486c42ec8115103e29ee137 - pristine_git_object: 88b5bad107d28943de8f25cb26c6597da2eba31d - src/mistralai/models/agents_api_v1_agents_update_versionop.py: - id: 262e7a2f05e3 - last_write_checksum: sha1:faa5550d08ddbb8223e8e6f2fcea6f09408bd228 - pristine_git_object: 5e4b97b3b175a8485fd04adc5b92a4870a46bda9 - src/mistralai/models/agents_api_v1_agents_updateop.py: - id: 72f9d6466691 - last_write_checksum: sha1:9c99959045d9d182a9814954dcd769b294267165 - pristine_git_object: 32696fbe60f17067520bf574bac8144abeb7af3f - src/mistralai/models/agents_api_v1_conversations_append_streamop.py: - id: 89a020d8fdfd - last_write_checksum: sha1:ec2fbbc5017a2374ab3f75a33592399b83fcc5f6 - pristine_git_object: d2489ffb2e01dc6a4e93aee931723be55261ca6c - src/mistralai/models/agents_api_v1_conversations_appendop.py: - id: fd73b0582d26 - last_write_checksum: sha1:22f62e8277ae5845e2b3c41d81d962edc3592090 - pristine_git_object: ba37697ea506fe08ecee5ed7585a1deee56a0827 - src/mistralai/models/agents_api_v1_conversations_deleteop.py: - id: ecd0a5c14be5 - last_write_checksum: sha1:bd894dcef52e02541fa09ae0d51755dad946e3c2 - pristine_git_object: 94126cae1a7a4cd09037d8224cd79f63935a2636 - src/mistralai/models/agents_api_v1_conversations_getop.py: - id: 600a28e887fe - last_write_checksum: sha1:b2dbccf934677ed646bb9ad6e947787bb6c4235b - pristine_git_object: a37a61babd146035d51095143f8781c0d94be0c3 - src/mistralai/models/agents_api_v1_conversations_historyop.py: - id: 5e3db049c234 - last_write_checksum: sha1:fde97f139a93c4723abc4f08ebcf20afcdf67d54 - pristine_git_object: b8c33d1b1b18b0a0c6b263962efc1d84d066021a - src/mistralai/models/agents_api_v1_conversations_listop.py: - id: 3cf4a3751a1c - last_write_checksum: sha1:ac8ae982fc23123b8b3ce3c1ba58980a1c6e2119 - pristine_git_object: d314f83853dbef74fa2e5ce2b5a800843110cc14 - src/mistralai/models/agents_api_v1_conversations_messagesop.py: - id: c7eb683e873e - last_write_checksum: sha1:d96c4e78c4ce75b668bc23aec91be399a0d26541 - pristine_git_object: f0dac8bf6a58882b55c88b12e039357c5ff7dfe4 - src/mistralai/models/agents_api_v1_conversations_restart_streamop.py: - id: c9d4d80d68d5 - last_write_checksum: sha1:8a96d0ccbe2918a13e022f629ea62120e9ed5c0d - pristine_git_object: f39b74eb6358938de7fddf7d1fd92eb4fb011f6b - src/mistralai/models/agents_api_v1_conversations_restartop.py: - id: 9dadcde20152 - last_write_checksum: sha1:44a127399dfcbc7c07af3c686469bcbb6e798b40 - pristine_git_object: f706c066d1de93cf03c9a7829fc3ea79eddfc8ad - src/mistralai/models/agentscompletionrequest.py: - id: 843813a24928 - last_write_checksum: sha1:f84d77c55787a07c5a8f7cb25d13dc02762e5c80 - pristine_git_object: cc07a6bdd38e221e66ca4162ef74354ef1c9f5e2 - src/mistralai/models/agentscompletionstreamrequest.py: - id: 6be8367d3443 - last_write_checksum: sha1:7bc5fd554e4adf8d8eb0a8f81aae32266b174932 - pristine_git_object: d6a887be8f33db56ae0eec47b5300a3a29736067 - src/mistralai/models/agentupdaterequest.py: - id: 24e7a9fdb507 - last_write_checksum: sha1:a5bb4a17ff80a3471321d38faa1e6605ebe541a4 - pristine_git_object: e496907c084f0a6cf90de6ebbf508d3137699bf0 - src/mistralai/models/apiendpoint.py: - id: b26effd643dc - last_write_checksum: sha1:07ba583784d9099e6a24e94805a405112e2fcb41 - pristine_git_object: 0ad9366f0efbcf989f63fa66750dce2ecc5bb56a - src/mistralai/models/archiveftmodelout.py: - id: 48fc1069be95 - last_write_checksum: sha1:c3c6b5ae470f23805201cd5565fca095bc9b7a74 - pristine_git_object: 0f753cfc948282f4ee5004fe463c091ed99e83a7 - src/mistralai/models/assistantmessage.py: - id: e73f1d43e4ad - last_write_checksum: sha1:b5d1d0a77b9a4e2f7272ff9fe7e319c2bc1bdb25 - pristine_git_object: a38a10c4968634d64f4bdb58d74f4955b29a92a8 - src/mistralai/models/audiochunk.py: - id: ad7cf79b2cca - last_write_checksum: sha1:c13008582708d368c3dee398cc4226f747b5a9d0 - pristine_git_object: 64fc43ff4c4ebb99b7a6c7aa3090b13ba4a2bdbc - src/mistralai/models/audioencoding.py: - id: f4713d60f468 - last_write_checksum: sha1:ffd1fd54680ea0bab343bdb22145b9eabc25c68d - pristine_git_object: 13eb6d1567f768da3753a73ddba9fa5e3ebfa7b3 - src/mistralai/models/audioformat.py: - id: 3572f5e8c65b - last_write_checksum: sha1:7259b46ebe4044633c0251eea5b3c88dedcc76a6 - pristine_git_object: 48ab648c3525fcc9fe1c722b7beee0f649e30e7a - src/mistralai/models/audiotranscriptionrequest.py: - id: 4c6a6fee484a - last_write_checksum: sha1:8dd41335ffd46dd1099bdb20baac32d043c5936c - pristine_git_object: 86417b4235292de3ab1d2b46116ce0ba94010087 - src/mistralai/models/audiotranscriptionrequeststream.py: - id: 863eca721e72 - last_write_checksum: sha1:010618236f3da1c99d63d334266622cf84e6b09f - pristine_git_object: 1f4087e8d33c8a3560d5ce58f2a1a7bc4627556b - src/mistralai/models/basemodelcard.py: - id: 5554644ee6f2 - last_write_checksum: sha1:aa5af32cda04d45bcdf2c8fb380529c4fbc828aa - pristine_git_object: 706841b7fc71051890201445050b5383c4b0e998 - src/mistralai/models/batcherror.py: - id: 657a766ed6c7 - last_write_checksum: sha1:5d727f59bbc23e36747af5e95ce20fcbf4ab3f7c - pristine_git_object: 4f8234465c57779d026fe65e131ba4cbe2746d40 - src/mistralai/models/batchjobin.py: - id: 7229d3fdd93b - last_write_checksum: sha1:074e8efd2474a1bf0949a7abcb90d3504a742f94 - pristine_git_object: 839a9b3cadb96986537422bc2a49532fcf9c2029 - src/mistralai/models/batchjobout.py: - id: 420d2a600dfe - last_write_checksum: sha1:486ecb38d44e9e3f8509504e30fe902f6869da1b - pristine_git_object: 904cd3496134ca38b8e53772f7b30e812bb92e65 - src/mistralai/models/batchjobsout.py: - id: 7bd4a7b41c82 - last_write_checksum: sha1:838e36e981a3dedb54663a32d8657d2a6ffaa364 - pristine_git_object: a1eba5db0ab8d8308b9e933352b55e32b80f33c7 - src/mistralai/models/batchjobstatus.py: - id: ee3393d6b301 + src/mistralai/client/models/agent.py: + id: 1336849c84fb + last_write_checksum: sha1:68609569847b9d638d948deba9563d5460c17b9f + pristine_git_object: 3bedb3a3a71c116f5ccb0294bc9f3ce6690e47b2 + src/mistralai/client/models/agentaliasresponse.py: + id: 3899a98a55dd + last_write_checksum: sha1:6dfa55d4b61a543382fab8e3a6e6d824feb5cfc7 + pristine_git_object: 4bc8225c0217f741328d52ef7df38f7a9c77af21 + src/mistralai/client/models/agentconversation.py: + id: 1b7d73eddf51 + last_write_checksum: sha1:2624deece37e8819cb0f60bbacbbf1922aa2c99c + pristine_git_object: 5dfa8c3137c59be90c655ba8cf8afb8a3966c93a + src/mistralai/client/models/agentcreationrequest.py: + id: 35b7f4933b3e + last_write_checksum: sha1:60caa3dfa2425ac3ff4e64d81ac9d18df0774157 + pristine_git_object: 61a5aff554f830ab9057ce9ceafc2ce78380290f + src/mistralai/client/models/agenthandoffdoneevent.py: + id: 82628bb5fcea + last_write_checksum: sha1:79de1153a3fce681ee547cc1d3bd0fd8fc5598d2 + pristine_git_object: c826aa5e1f2324cddb740b3ffc05095ff26c666d + src/mistralai/client/models/agenthandoffentry.py: + id: 5030bcaa3a07 + last_write_checksum: sha1:86622620c14e2aacbdcc47b9772a3b9bb4127018 + pristine_git_object: 0b0de13f8840e9ab221ea233040ca03241cba8b7 + src/mistralai/client/models/agenthandoffstartedevent.py: + id: 2f6093d9b222 + last_write_checksum: sha1:772bc7b396285560cdafd7d7fb4bc4ece79179ad + pristine_git_object: 4b8ff1e5e3639fb94b55c0a417e9478d5a4252b2 + src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py: + id: 23a832f8f175 + last_write_checksum: sha1:9ca9a0be2db68005fc0dec3f24d24fccf8d0c631 + pristine_git_object: 33da325cadf36ce8162bac11f1576872bcbbdbd6 + src/mistralai/client/models/agents_api_v1_agents_deleteop.py: + id: 95adb6768908 + last_write_checksum: sha1:9118fb084668440cec39ddd47b613fb4cd796c8d + pristine_git_object: 58fe902f0a51b50db869dfa760f1a3a4cba36342 + src/mistralai/client/models/agents_api_v1_agents_get_versionop.py: + id: ef9914284afb + last_write_checksum: sha1:d9b429cd8ea7d20050c0bc2077eec0084ed916b6 + pristine_git_object: edcccda19d5c3e784a227c6356285ee48be3d7f2 + src/mistralai/client/models/agents_api_v1_agents_getop.py: + id: f5918c34f1c7 + last_write_checksum: sha1:412df95a1ac4b4f6a59e4391fd1226f2e26e4537 + pristine_git_object: d4817457a33d49ddaa09e8d41f3b03b69e8e491e + src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py: + id: a04815e6c798 + last_write_checksum: sha1:7bd6ba32e2aeeee4c34f02bab1d460eb384f9229 + pristine_git_object: b9770fffe5be41579f12d76f41a049e8b41b3ef8 + src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py: + id: 19e3310c3907 + last_write_checksum: sha1:62b3b94ad3ed412f74cfc75572a91b7f3cd6b39b + pristine_git_object: 813335f9e972c976f0e887d1f26be3c224b36b0c + src/mistralai/client/models/agents_api_v1_agents_listop.py: + id: 25a6460a6e19 + last_write_checksum: sha1:586ad2257e4a2c70bdb6d0044afe7d1b20f23d93 + pristine_git_object: 119f51236dda0769ab3dc41a9dbbb11b5d5e935d + src/mistralai/client/models/agents_api_v1_agents_update_versionop.py: + id: 63f61b8891bf + last_write_checksum: sha1:b214f6850347e4c98930ef6f019fdad52668c8c0 + pristine_git_object: 116f952b2ba2a7dca47975a339267c85122cd29a + src/mistralai/client/models/agents_api_v1_agents_updateop.py: + id: bb55993c932d + last_write_checksum: sha1:28cd6d0b729745b2e16d91a5e005d59a6d3be124 + pristine_git_object: 116acaa741f79123e682db0be2adbb98cf8283d8 + src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py: + id: ec00e0905f15 + last_write_checksum: sha1:67967a775c3a1ec139ccd6991465ea15327e3ba7 + pristine_git_object: 9f00ffd4b484f03dae6e670d019f61a4392afc85 + src/mistralai/client/models/agents_api_v1_conversations_appendop.py: + id: 39c6125e850c + last_write_checksum: sha1:93621c5ea8fbc5c038c92596b7d4c0aef0a01e2f + pristine_git_object: 13d07ba91207f82dcea8f58c238cc743cd6c3964 + src/mistralai/client/models/agents_api_v1_conversations_deleteop.py: + id: 0792e6abbdcb + last_write_checksum: sha1:dc60f272fed790bec27c654da0fb185aab27ff82 + pristine_git_object: 81066f90302d79bc2083d1e31aa13656c27cc65f + src/mistralai/client/models/agents_api_v1_conversations_getop.py: + id: c530f2fc64d0 + last_write_checksum: sha1:28cab443af4d623a22e836ab876da20d84eb8a41 + pristine_git_object: c919f99e38148fb9b2d51816d0dd231ee828b11d + src/mistralai/client/models/agents_api_v1_conversations_historyop.py: + id: 2f5ca33768aa + last_write_checksum: sha1:9f33f183cd07b823b4727662ea305c74853049c5 + pristine_git_object: ba1f8890c1083947e4d6882dff2b50b3987be738 + src/mistralai/client/models/agents_api_v1_conversations_listop.py: + id: 936e36181d36 + last_write_checksum: sha1:b338f793707c25ce9703266d8b7f6f560051b057 + pristine_git_object: bb3c7127c4b43019405689dc2ae10f5933c763bc + src/mistralai/client/models/agents_api_v1_conversations_messagesop.py: + id: b5141764a708 + last_write_checksum: sha1:0be49e2ad8a3edb079ce4b1f092654c7a6b7e309 + pristine_git_object: e05728f2c2c0a350bdaf72fe9dc488c923230ab7 + src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py: + id: c284a1711148 + last_write_checksum: sha1:ef22ebf2e217ab41ce0b69cf388122ee18ad7b05 + pristine_git_object: 9b489ab46486cc37349d64a4fc685f1355afb79a + src/mistralai/client/models/agents_api_v1_conversations_restartop.py: + id: 3ba234e5a8fc + last_write_checksum: sha1:e7e22098d8b31f5cc5cb0e8fafebe515842c2f88 + pristine_git_object: 8bce3ce519a69a6d1cb36383b22fb801768c4868 + src/mistralai/client/models/agentscompletionrequest.py: + id: 3960bc4c545f + last_write_checksum: sha1:7f2176c96916c85ac43278f3ac23fe5e3da35aca + pristine_git_object: 22368e44adb1b3ecff58d2b92592710335a062b9 + src/mistralai/client/models/agentscompletionstreamrequest.py: + id: 1b73f90befc2 + last_write_checksum: sha1:8126924507b41754ec1d4a10613cf189f5ea0aea + pristine_git_object: 37d46c79d8964d799679413e14122a5146799eb6 + src/mistralai/client/models/agentupdaterequest.py: + id: 2d5a3a437819 + last_write_checksum: sha1:97509eeb4cd25d31a0e1f3b4de1288580cb9a5cb + pristine_git_object: 261ac069ce4e2b630d39080edf47bf2ad510ffb4 + src/mistralai/client/models/apiendpoint.py: + id: 00b34ce0a24d + last_write_checksum: sha1:0a1a08e7faaa7be804de952248b4f715c942af9a + pristine_git_object: a6072d568e08ab1f5e010d5924794adfb2188920 + src/mistralai/client/models/archiveftmodelout.py: + id: bab499599d30 + last_write_checksum: sha1:352eb0aca8368d29ef1b68820540363e8fa69be4 + pristine_git_object: 6108c7e153abecfc85be93b6fa1f9f22480f6d9b + src/mistralai/client/models/assistantmessage.py: + id: 2b49546e0742 + last_write_checksum: sha1:235a0f8d14b3100f5c498a9784ddda1f824a77a9 + pristine_git_object: 3ba14ce78e01c92458477bb025b9e5ded074fd4d + src/mistralai/client/models/audiochunk.py: + id: ce5dce4dced2 + last_write_checksum: sha1:6d8ed87fd3f114b2b04aa15dd24d0dd5b1837215 + pristine_git_object: 80d836f27ae65f30c6ca0e1d4d5d585bbf498cfd + src/mistralai/client/models/audioencoding.py: + id: b14e6a50f730 + last_write_checksum: sha1:8c8d6c1da3958200bf774313c485189426439545 + pristine_git_object: 557f53ed7a90f05e5c457f8b217d3df07e113e0b + src/mistralai/client/models/audioformat.py: + id: c8655712c218 + last_write_checksum: sha1:baef21b264f77117bbaa1336d7efefae916b9119 + pristine_git_object: 7ea10b3ad610aa1500fd25500ff942988ea0e1db + src/mistralai/client/models/audiotranscriptionrequest.py: + id: e4148b4d23e7 + last_write_checksum: sha1:52c245a739864ca838d4c4ef4bdf74e7b0c60f2e + pristine_git_object: 78a3797882841a6fd1251d72756f6b75f6d01006 + src/mistralai/client/models/audiotranscriptionrequeststream.py: + id: 33a07317a3b3 + last_write_checksum: sha1:e468052c9ab8681ff0e1121e61aff406fc4427fc + pristine_git_object: 350643614e23002bc55e99e2d1807bedd80a0613 + src/mistralai/client/models/basemodelcard.py: + id: 556ebdc33276 + last_write_checksum: sha1:f524e61a160af83b20f7901afc585f61bfad6e05 + pristine_git_object: 8ce7f139b6018c4a7358a21534532cd3e741fa8a + src/mistralai/client/models/batcherror.py: + id: 1563e2a576ec + last_write_checksum: sha1:239f9c44477941c45a3e7fe863828299d36267d6 + pristine_git_object: a9c8362bfa08ab4727f08a6dd2b44a71040560f7 + src/mistralai/client/models/batchjobin.py: + id: 72b25c2038d4 + last_write_checksum: sha1:0064f199b6f27b5101f6a9abf0532f61c522e2c8 + pristine_git_object: 39cf70b5bdf8db8adaa5c9d1dd8a227b2365879b + src/mistralai/client/models/batchjobout.py: + id: cbf1d872a46e + last_write_checksum: sha1:44a92b4f427b77db29294a3b6d375f8622660ee1 + pristine_git_object: 008d43b4340cf8853fac751fb6f15525f765fe39 + src/mistralai/client/models/batchjobsout.py: + id: 20b2516e7efa + last_write_checksum: sha1:7d4223363e861137b9bce0dc78460c732a63c90b + pristine_git_object: 2654dac04c126a933f6d045f43f16a30263750dc + src/mistralai/client/models/batchjobstatus.py: + id: 61e08cf5eea9 last_write_checksum: sha1:9e042ccd0901fe4fc08fcc8abe5a3f3e1ffe9cbb pristine_git_object: 4b28059ba71b394d91f32dba3ba538a73c9af7a5 - src/mistralai/models/batchrequest.py: - id: 6b77bb906183 - last_write_checksum: sha1:5f4b2f5804c689e3468fe93e2b7855f2f164bbe8 - pristine_git_object: 3d1e98f7a1162abadd37d6661841727d33dbafd7 - src/mistralai/models/builtinconnectors.py: - id: 611d5b9f6fa4 + src/mistralai/client/models/batchrequest.py: + id: 6f36819eeb46 + last_write_checksum: sha1:0ce0e6982c96933e73a31c6ebfb29f78b6ebf13b + pristine_git_object: 24f50a9af9a74f6bec7e8903a966d114966a36d3 + src/mistralai/client/models/builtinconnectors.py: + id: 2d276ce938dc last_write_checksum: sha1:4e94744e3854d4cdc9d1272e4f1d9371f9829a5f pristine_git_object: 6a3b2476d54096722eb3e7a271629d108028bd35 - src/mistralai/models/chatclassificationrequest.py: - id: 7fee7b849791 - last_write_checksum: sha1:22d8e106c165c9a16f220dc242b9165e5dcd6963 - pristine_git_object: f06f4f34d264d5bd049ced125d8675434c4fab96 - src/mistralai/models/chatcompletionchoice.py: - id: 362cbbc2f932 - last_write_checksum: sha1:6d66a95497493bff71ed75954e7eb9965370a3a4 - pristine_git_object: f2057ab4addf806d0458c40cb8bdf1f823da51f2 - src/mistralai/models/chatcompletionrequest.py: - id: ed77c35d0007 - last_write_checksum: sha1:e40cfe95a97a04addf2b37e6ba8df61ab3c1e199 - pristine_git_object: ad8b542863fd4158c1966e839d4ca9992982c2f8 - src/mistralai/models/chatcompletionresponse.py: - id: 227c368abb96 - last_write_checksum: sha1:1f8d263cc3388507fcec7a0e2419d755433a1e3e - pristine_git_object: 3d03b1265f4c41b6e11d10edcff0e4f9fea1e434 - src/mistralai/models/chatcompletionstreamrequest.py: - id: d01414c359f7 - last_write_checksum: sha1:76c0d6dcd9d1e50208c8906f3ae29e0bea39a71b - pristine_git_object: 10f97e5f006c904d37aa9bb1584030196c53ed98 - src/mistralai/models/chatmoderationrequest.py: - id: 9146b8de3702 - last_write_checksum: sha1:c0465d837b1517e061036f69faa0f40464873ff6 - pristine_git_object: 2f58d52fd00e2a1003445a1e524e3856dd8ad4c7 - src/mistralai/models/checkpointout.py: - id: ee97be8b74d3 - last_write_checksum: sha1:55cd36289696fa4da06a06812a62859bac83479f - pristine_git_object: aefb7731d0dfc71db4647509ef4e0ad1d70a3a95 - src/mistralai/models/classificationrequest.py: - id: fbb8aaa182b6 - last_write_checksum: sha1:300492b338cc354bee820a3b27fae7ad9900af5c - pristine_git_object: 8a3543785599e49df7f54069c98dedecbc545e12 - src/mistralai/models/classificationresponse.py: - id: b73b192344cb - last_write_checksum: sha1:0fa30f6b7eba3cbf1951bd45724d99b1ff023bb1 - pristine_git_object: b7741f373f062d552a67550dcd30e0592805ce93 - src/mistralai/models/classificationtargetresult.py: - id: 718124fab7ab - last_write_checksum: sha1:de004f490ec6da5bee26590697a97c68d7db9168 - pristine_git_object: 60c5a51b0a5e3f2b248f1df04ba12ec5075556eb - src/mistralai/models/classifierdetailedjobout.py: - id: aebdcce0d168 - last_write_checksum: sha1:5d16ca3b3c375a899ee25fc9ce74d877d71b7be1 - pristine_git_object: 701aee6e638ee8ca3e43500abce790a6f76df0c7 - src/mistralai/models/classifierftmodelout.py: - id: 12437ddfc64e - last_write_checksum: sha1:2436c401d49eb7fa0440fca6f09045f20bb52da1 - pristine_git_object: d2a31fae8c534b1008b96c8d4f1e22d69b85c6f3 - src/mistralai/models/classifierjobout.py: - id: aa6ee49244f8 - last_write_checksum: sha1:0c2fe0e01ccfa25686565bc836d3745313f61498 - pristine_git_object: a2f7cc08b35152a1b56bbfbaa49f9231df651719 - src/mistralai/models/classifiertargetin.py: - id: 0439c322ce64 - last_write_checksum: sha1:92b7928166f1a0ed8a52c6ccd7523119690d9a35 - pristine_git_object: d8a060e4896cbe9ccf27be91a44a84a3a84589f7 - src/mistralai/models/classifiertargetout.py: - id: 1c9447805aaa - last_write_checksum: sha1:bf961d9be0bd5239032a612eb822ad8adcee6d99 - pristine_git_object: ddc587f46a3bc78df5d88793c768431429ccf409 - src/mistralai/models/classifiertrainingparameters.py: - id: 8d7d510cb1a1 - last_write_checksum: sha1:72c19293d514c684e1bd4a432b34382f4d674e26 - pristine_git_object: 718beeac3aa1fc2b8af52d61510f34414bcab990 - src/mistralai/models/classifiertrainingparametersin.py: - id: 3da8da32eac4 - last_write_checksum: sha1:ae5088ac22014504b3d3494db46869b87716342b - pristine_git_object: 9868843fbb81cc45657980b36c3c9409d386114d - src/mistralai/models/codeinterpretertool.py: - id: 8c90fc7cca85 - last_write_checksum: sha1:d0e3832422493176bcb29b4edec0aa40c34faa12 - pristine_git_object: 48b74ee85c897179f6f2855d6737e34031b6c0f8 - src/mistralai/models/completionargs.py: - id: 6673897ce695 - last_write_checksum: sha1:a6b22e1abc324b8adceb65cbf990c0a0ab34b603 - pristine_git_object: 40aa0314895b5b2e9b598d05f9987d39518a6c60 - src/mistralai/models/completionargsstop.py: - id: d3cf548dde2f + src/mistralai/client/models/chatclassificationrequest.py: + id: afd9cdc71834 + last_write_checksum: sha1:84cc02714fe8ae408a526ab68c143b9b51ea5279 + pristine_git_object: 450810225bb43bbd1539768e291840a210489f0f + src/mistralai/client/models/chatcompletionchoice.py: + id: 7e6a512f6a04 + last_write_checksum: sha1:bc3fb866e2eb661b1619f118af459d18ba545d40 + pristine_git_object: 5d888cfd73b82097d647f2f5ecdbdf8beee2e098 + src/mistralai/client/models/chatcompletionrequest.py: + id: 9979805d8c38 + last_write_checksum: sha1:ccd9f3908c71d6fc3ad57f41301348918b977a6f + pristine_git_object: 30fce28d5e071797a7180753f2825d39cfeac362 + src/mistralai/client/models/chatcompletionresponse.py: + id: 669d996b8e82 + last_write_checksum: sha1:af8071e660b09437a32482cdb25fd07096edc080 + pristine_git_object: 60a1f561ff29c3bc28ee6aea69b60b9d47c51471 + src/mistralai/client/models/chatcompletionstreamrequest.py: + id: 18cb2b2415d4 + last_write_checksum: sha1:a067cc25d2e8c5feb146bdb0b69fb5186e77c416 + pristine_git_object: 21dad38bb83e9b334850645ffa24e1099b121f6c + src/mistralai/client/models/chatmoderationrequest.py: + id: 057aecb07275 + last_write_checksum: sha1:f93d1758dd8c0f123d8c52d162e3b4c8681bf121 + pristine_git_object: 631c914d1a4f4453024665eb0a8233ec7a070332 + src/mistralai/client/models/checkpointout.py: + id: 3866fe32cd7c + last_write_checksum: sha1:c2b57fe880c75290b100904c26afaadd356fbe88 + pristine_git_object: 89189ed19dc521bc862da0aec1997bba0854def7 + src/mistralai/client/models/classificationrequest.py: + id: 6942fe3de24a + last_write_checksum: sha1:3b99dba1f7383defed1254fba60433808184e8e7 + pristine_git_object: c724ff534f60022599f34db09b517f853ae7968d + src/mistralai/client/models/classificationresponse.py: + id: eaf279db1109 + last_write_checksum: sha1:0e09986f5db869df04601cec3793552d17e7ed04 + pristine_git_object: 4bc21a58f0fb5b5f29357f2729250030b7d961bc + src/mistralai/client/models/classificationtargetresult.py: + id: 2445f12b2a57 + last_write_checksum: sha1:9325f4db4e098c3bf7e24cfc487788e272a5896f + pristine_git_object: 89a137c374efc0f8b3ee49f3434f264705f69639 + src/mistralai/client/models/classifierdetailedjobout.py: + id: d8daeb39ef9f + last_write_checksum: sha1:d33e6a4672b33b6092caec50cc957d98e32058f7 + pristine_git_object: 1de4534fcb12440a004e94bc0eced7483952581d + src/mistralai/client/models/classifierftmodelout.py: + id: 2903a7123b06 + last_write_checksum: sha1:4662ec585ade8347aeda4f020b7d31978bf8f9bb + pristine_git_object: a4572108674ea9c209b6224597878d5e824af686 + src/mistralai/client/models/classifierjobout.py: + id: e19e9c4416cc + last_write_checksum: sha1:0239761cb318518641281f584783bd2b42ec3340 + pristine_git_object: ab1e261d573a30714042af3f20ed439ddbf1d819 + src/mistralai/client/models/classifiertargetin.py: + id: ed021de1c06c + last_write_checksum: sha1:cd1c0b8425c752815825abaedab8f4e2589cbc8f + pristine_git_object: 231ee21e61f8df491057767eac1450c60e8c706a + src/mistralai/client/models/classifiertargetout.py: + id: 5131f55abefe + last_write_checksum: sha1:4d9f66e3739f99ff1ea6f3468fe029d664541d58 + pristine_git_object: 957104a7bcc880d84ddefe39e58969b20f36d24c + src/mistralai/client/models/classifiertrainingparameters.py: + id: 4000b05e3b8d + last_write_checksum: sha1:a9d4eecd716bd078065531198f5a57b189caeb79 + pristine_git_object: 60f53c374ece9a5d336e8ab20c05c2d2c2d931f9 + src/mistralai/client/models/classifiertrainingparametersin.py: + id: 4b33d5cf0345 + last_write_checksum: sha1:f50e68c14be4655d5cf80f6c98366d32bbd01869 + pristine_git_object: e24c9ddecf60c38e146b8f94ad35be95b3ea2609 + src/mistralai/client/models/codeinterpretertool.py: + id: 950cd8f4ad49 + last_write_checksum: sha1:533ae809df90e14e4ef6e4e993e20e37f969f39f + pristine_git_object: faf5b0b78f2d9981bb02eee0c28bba1fdba795b9 + src/mistralai/client/models/completionargs.py: + id: 3db008bcddca + last_write_checksum: sha1:4b4f444b06a286098ce4e5018ffef74b3abf5b91 + pristine_git_object: 010910f6f00a85b706a185ca5770fe70cc998905 + src/mistralai/client/models/completionargsstop.py: + id: 5f339214501d last_write_checksum: sha1:99912f7a10e92419308cf3c112c36f023de3fc11 pristine_git_object: de7a09564540daaa6819f06195c347b3e01914f7 - src/mistralai/models/completionchunk.py: - id: d3dba36f2e47 - last_write_checksum: sha1:e93199f69c09b0f7c5c169c90c990a7e7439b64a - pristine_git_object: 4d1fcfbf2e46382cc1b8bbe760efa66ceb4207b3 - src/mistralai/models/completiondetailedjobout.py: - id: 7e46c1d1597b - last_write_checksum: sha1:4ef7f96a2ac505891fec22e4fe491ea21da67e0b - pristine_git_object: df41bc2ab5bf484d755d31fa132158bd1dc5b489 - src/mistralai/models/completionevent.py: - id: 7d9b2ff555f0 - last_write_checksum: sha1:268f8b79bf33e0113d1146577827fe10e47d3078 - pristine_git_object: cc8599103944b8eebead6b315098a823e4d086e3 - src/mistralai/models/completionftmodelout.py: - id: 20e6aae7163d - last_write_checksum: sha1:8272d246489fe8d3743d28b37b49b660ca832ea1 - pristine_git_object: 7b6520de657363e984eef8efd870b4b841dc52e0 - src/mistralai/models/completionjobout.py: - id: 36ce54765988 - last_write_checksum: sha1:c167fae08705eccd65ec30e99046276bdcdd1b97 - pristine_git_object: 70995d2a8e45ac5bf9a4b870d7b745e07f09856f - src/mistralai/models/completionresponsestreamchoice.py: - id: a5323819cf5b - last_write_checksum: sha1:dfb9c108006fc3ac0f1d0bbe8e379792f90fac19 - pristine_git_object: 80f63987d3d41512b8a12f452aab41c97d2691b0 - src/mistralai/models/completiontrainingparameters.py: - id: 701db02d1d12 - last_write_checksum: sha1:bb6d3ca605c585e6281d85363e374923ed6ddd33 - pristine_git_object: 0200e81c35f05863eee7753e530d9c2290c56404 - src/mistralai/models/completiontrainingparametersin.py: - id: 0858706b6fc7 - last_write_checksum: sha1:0c8735e28dc6c27bf759a6bd93e8f1cf0919b382 - pristine_git_object: 1f74bb9da85bd721c8f11521b916ae986cd473eb - src/mistralai/models/contentchunk.py: - id: f753f1e60f3b - last_write_checksum: sha1:af68b3ca874420a034d7e116a67974da125d5a30 - pristine_git_object: 47170eefb0ed04399548d254896fa616b24ec258 - src/mistralai/models/conversationappendrequest.py: - id: ddbd85dab2db - last_write_checksum: sha1:c8ca45ad5b8340531a469e9847ee64f80c8db4c3 - pristine_git_object: 15cbc687396ee59eee742d65e490c354fdbf0688 - src/mistralai/models/conversationappendstreamrequest.py: - id: 7d9c85747963 - last_write_checksum: sha1:ada1cbcad5ce2dd6a6bc268b30f78dc69901ff6c - pristine_git_object: 8cecf89d3342be9a94066716863f4fa121b29012 - src/mistralai/models/conversationevents.py: - id: f543ca03cde2 - last_write_checksum: sha1:7e6ac7ea6f4e216071af7460133b6c7791f9ce65 - pristine_git_object: ba4c628c9de7fb85b1dcd5a47282f97df62a3730 - src/mistralai/models/conversationhistory.py: - id: ab4d51ae0094 - last_write_checksum: sha1:1d85aa48d019ce003e2d151477e0c5925bd619e7 - pristine_git_object: d5206a571e865e80981ebfcc99e65859b0dc1ad1 - src/mistralai/models/conversationinputs.py: - id: 50986036d205 + src/mistralai/client/models/completionchunk.py: + id: d786b44926f4 + last_write_checksum: sha1:e38d856ffefd3b72ff7034fa030ca0071caa0996 + pristine_git_object: 9790db6fe35e0043f3240c0f7e8172d36dee96f5 + src/mistralai/client/models/completiondetailedjobout.py: + id: 9bc38dcfbddf + last_write_checksum: sha1:df43d27716d99b6886a2b2a389e4c7b8c0b61630 + pristine_git_object: 85c0c803cf809338900b7b8dcde774d731b67f8f + src/mistralai/client/models/completionevent.py: + id: c68817e7e190 + last_write_checksum: sha1:c29f7e8a5b357e15606a01ad23e21341292b9c5e + pristine_git_object: 52db911eeb62ec7906b396d6936e3c7a0908bb76 + src/mistralai/client/models/completionftmodelout.py: + id: 0f5277833b3e + last_write_checksum: sha1:d125468e84529042a19e29d1c34aef70318ddf54 + pristine_git_object: ccecbb6a59f2994051708e66bce7ece3598a786f + src/mistralai/client/models/completionjobout.py: + id: 712e6c524f9a + last_write_checksum: sha1:4ca927d2eb17e2f2fe588fd22f6aaa32a4025b07 + pristine_git_object: ecd95bb9c93412b222659e6f369d3ff7e13c8bb2 + src/mistralai/client/models/completionresponsestreamchoice.py: + id: 5969a6bc07f3 + last_write_checksum: sha1:aa04c99a8bca998752b44fc3e2f2d5e24434a9bf + pristine_git_object: 1b8d6faccbe917aaf751b4efa676bf51c1dcd3ff + src/mistralai/client/models/completiontrainingparameters.py: + id: be202ea0d5a6 + last_write_checksum: sha1:fa4a0f44afeb3994c9273c5b4c9203eef810b957 + pristine_git_object: 36b285ab4f41209c71687a14c8650c0db52e165f + src/mistralai/client/models/completiontrainingparametersin.py: + id: 0df22b873b5f + last_write_checksum: sha1:109503fabafd24174c671f2caa0566af2d46800e + pristine_git_object: d0315d9984575cb6c02bc6e38cedde3deef77b9a + src/mistralai/client/models/contentchunk.py: + id: c007f5ee0325 + last_write_checksum: sha1:a319b67206f4d0132544607482e685b46e2dce8c + pristine_git_object: 0a25423f9f9a95ced75d817ad7712747ce0915ae + src/mistralai/client/models/conversationappendrequest.py: + id: 81ce529e0865 + last_write_checksum: sha1:4f38d4aa2b792b113ef34ce54df3ac9b2efca5e1 + pristine_git_object: 867c0a414c1340033af7f6d03ea8cef2dcb8ff4a + src/mistralai/client/models/conversationappendstreamrequest.py: + id: 27ada745e6ad + last_write_checksum: sha1:41dcb9467d562bcc8feb885a56f73ac8d013c2d8 + pristine_git_object: f51407bf2a363f705b0b61ed7be4ef6249525af5 + src/mistralai/client/models/conversationevents.py: + id: 8c8b08d853f6 + last_write_checksum: sha1:e0d920578ca14fa186b3efeee69ed03f7a2aa119 + pristine_git_object: 308588a1f094631935e4229f5538c5092f435d2c + src/mistralai/client/models/conversationhistory.py: + id: 60a51ff1682b + last_write_checksum: sha1:ed60e311224c3ada9c3768335394a5b338342433 + pristine_git_object: 40bd1e7220160f54b0ab938b3627c77fb4d4f9ef + src/mistralai/client/models/conversationinputs.py: + id: 711b769f2c40 last_write_checksum: sha1:3e8c4650808b8059c3a0e9b1db60136ba35942df pristine_git_object: 4d30cd76d14358e12c3d30c22e3c95078ecde4bd - src/mistralai/models/conversationmessages.py: - id: be3ced2d07e7 - last_write_checksum: sha1:410317f1b45f395faa66a9becd7bb2398511ba60 - pristine_git_object: 32ca9c20cb37ff65f7e9b126650a78a4b97e4b56 - src/mistralai/models/conversationrequest.py: - id: ceffcc288c2d - last_write_checksum: sha1:c4c62ef9cdf9bb08463bcb12919abd98ceb8d344 - pristine_git_object: 80581cc10a8e7555546e38c8b7068a2744eb552b - src/mistralai/models/conversationresponse.py: - id: 016ec02abd32 - last_write_checksum: sha1:37c3f143b83939b369fe8637932974d163da3c37 - pristine_git_object: ff318e35ee63e43c64e504301236327374442a16 - src/mistralai/models/conversationrestartrequest.py: - id: 2a8207f159f5 - last_write_checksum: sha1:93cd4370afe6a06b375e0e54ca09225e02fc42d3 - pristine_git_object: 6f21d01267481b8b47d4d37609ac131c34c10a9b - src/mistralai/models/conversationrestartstreamrequest.py: - id: d98d3e0c8eed - last_write_checksum: sha1:90f295ce27ba55d58899e06a29af223a464f5a4c - pristine_git_object: 2cec7958ab31378d480f0f93a5ed75ac8c624442 - src/mistralai/models/conversationstreamrequest.py: - id: f7051f125d44 - last_write_checksum: sha1:12bc85a14f110f5c8a3149540668bea178995fae - pristine_git_object: 1a481b77f706db7101521756c7c3476eaa1918c5 - src/mistralai/models/conversationusageinfo.py: - id: 922894aa994b - last_write_checksum: sha1:0e0039421d7291ecbbf820ea843031c50371dd9e - pristine_git_object: 9ae6f4fb6a7b4fd056c677c2152625de422b490a - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py: - id: 409899d6ca23 - last_write_checksum: sha1:2d1e5b8947b56abba06363358973032e196c8139 - pristine_git_object: 4acb8d5373f25d7200378d0b8a767451978aa5a9 - src/mistralai/models/deletefileout.py: - id: d51d0de32738 - last_write_checksum: sha1:da9e95bb804820dea4977f65f62c08e491d9bb4b - pristine_git_object: 2b346ec4879c8811f824c7e6bde9fef922f37382 - src/mistralai/models/deletemodelout.py: - id: 8dcf3427f17b - last_write_checksum: sha1:8243b0bcf735a67d4cffb254fe9de95f130a0d8a - pristine_git_object: c1b1effcbe3b093f7dede49684cf88aa0a9b27a7 - src/mistralai/models/deltamessage.py: - id: 43ee8a48546e - last_write_checksum: sha1:8bc50b7943d5ae4725eb57b7ca21a4c1217e4c0d - pristine_git_object: 88aefe7f652296c02377714586d38b8e318a419d - src/mistralai/models/documentlibrarytool.py: - id: 24c1c0293181 - last_write_checksum: sha1:7ec74875595149f433ee1b8a95d8183aa1cf8738 - pristine_git_object: 8d4c122b0412682a792c754a06e10809bfd8c25c - src/mistralai/models/documentout.py: - id: 205cb7721dfa - last_write_checksum: sha1:9316ed725bd9d7a2ef1f4e856f61def684442bd7 - pristine_git_object: 81d9605f38e40a703911fefc15731ec102c74ccb - src/mistralai/models/documenttextcontent.py: - id: 685680d8640b - last_write_checksum: sha1:dafce4998fa5964ac6833e71f7cb4f23455c14e6 - pristine_git_object: c02528c2052d535f7c815fb1165df451d49fef79 - src/mistralai/models/documentupdatein.py: - id: 6d69a91f40bd - last_write_checksum: sha1:dcbc51f1a1192bb99732405420e57fedb32dd1de - pristine_git_object: bd89ff4793e4fd78a4bae1c9f5aad716011ecbfd - src/mistralai/models/documenturlchunk.py: - id: 34a86f25f54f - last_write_checksum: sha1:1496b3d587fd2c5dc1c3f18de1ac59a29c324849 - pristine_git_object: 6d0b1dc6c9f6ebca8638e0c8991a9aa6df2b7e48 - src/mistralai/models/embeddingdtype.py: - id: bca8ae3779ed + src/mistralai/client/models/conversationmessages.py: + id: 011c39501c26 + last_write_checksum: sha1:f71e85febab797d5c17b58ef8a1318545c974ed2 + pristine_git_object: 1ea05369b95fdaa7d7ae75398669f88826e5bb26 + src/mistralai/client/models/conversationrequest.py: + id: 58e3ae67f149 + last_write_checksum: sha1:20339231abbf60fb160f2dc24941860304c702fd + pristine_git_object: e3211c4c7b20c162473e619fad6dc0c6cea6b571 + src/mistralai/client/models/conversationresponse.py: + id: ad7a8472c7bf + last_write_checksum: sha1:50fdea156c2f2ce3116d41034094c071a3e136fa + pristine_git_object: 32d0f28f101f51a3ca79e4d57f4913b1c420b189 + src/mistralai/client/models/conversationrestartrequest.py: + id: 681d90d50514 + last_write_checksum: sha1:76c5393b280e263a38119d98bdcac917afe36881 + pristine_git_object: aa2bf7b0dcdf5e343a47787c4acd00fe3f8bd405 + src/mistralai/client/models/conversationrestartstreamrequest.py: + id: 521c2b5bfb2b + last_write_checksum: sha1:5ba78bf9048b1e954c45242f1843eb310b306a94 + pristine_git_object: 689815ebcfe577a1698938c9ccbf100b5d7995f8 + src/mistralai/client/models/conversationstreamrequest.py: + id: 58d633507527 + last_write_checksum: sha1:9cb79120c78867e12825ac4d504aa55ee5827168 + pristine_git_object: 219230a2a8dd7d42cc7f5613ca22cec5fa872750 + src/mistralai/client/models/conversationusageinfo.py: + id: 6685e3b50b50 + last_write_checksum: sha1:7fa37776d7f7da6b3a7874c6f398d6f607c01b52 + pristine_git_object: 7a818c89a102fe88eebc8fec78a0e195e26cf85d + src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py: + id: 767aba526e43 + last_write_checksum: sha1:9a8f9917fc5de154e8a6fdb44a1dd7db55bb1de5 + pristine_git_object: 1cd36128a231a6d4be328fde53d1f048ff7c2ccd + src/mistralai/client/models/deletefileout.py: + id: 5578701e7327 + last_write_checksum: sha1:76d209f8b3bba5e4bc984700fe3d8981c9d6142b + pristine_git_object: b25538bee35dedaae221ea064defb576339402c8 + src/mistralai/client/models/deletemodelout.py: + id: ef6a1671c739 + last_write_checksum: sha1:ef2f6774eaf33c1c78368cd92bc4108ecccd9a6c + pristine_git_object: 5aa8b68fe3680d3b51127d6a6b6068b1303756e8 + src/mistralai/client/models/deltamessage.py: + id: 68f53d67a140 + last_write_checksum: sha1:52296fa6d7fc3788b64dcb47aadd0818bcb86e11 + pristine_git_object: 0ae56da86f645e5a0db2a0aa4579342610243300 + src/mistralai/client/models/documentlibrarytool.py: + id: 3eb3c218f457 + last_write_checksum: sha1:af01ec63a1c5eb7c332b82b3ec1d3553891614c2 + pristine_git_object: 861a58d38125ca5af11772ebde39a7c57c39ad9c + src/mistralai/client/models/documentout.py: + id: 7a85b9dca506 + last_write_checksum: sha1:2de0e0f9be3a2362fbd7a49ff664b43e4c29a262 + pristine_git_object: 39d0aa2a5a77d3eb3349ae5e7b02271c2584fe56 + src/mistralai/client/models/documenttextcontent.py: + id: e730005e44cb + last_write_checksum: sha1:ad7e836b5f885d703fd5f09c09aba0628d77e05b + pristine_git_object: b1c1aa073dff4dcdc59d070058221b67ce9e36f9 + src/mistralai/client/models/documentupdatein.py: + id: d19c1b26a875 + last_write_checksum: sha1:bad1cee0906961f555784e03c23f345194959077 + pristine_git_object: 02022b89ef2b87349e0d1dc4cccc3d1908a2d1aa + src/mistralai/client/models/documenturlchunk.py: + id: 4309807f6048 + last_write_checksum: sha1:1253bdbe1233481622b76e340413ffb1d8996f0e + pristine_git_object: 00eb55357f19ac4534446e0ee761bdbccfb471e2 + src/mistralai/client/models/embeddingdtype.py: + id: 77f9526a78df last_write_checksum: sha1:962f629fa4ee8a36e731d33f8f730d5741a9e772 pristine_git_object: 26eee779e12ae8114a90d3f18f99f3dd50e46b9e - src/mistralai/models/embeddingrequest.py: - id: ccb2b16068c8 - last_write_checksum: sha1:bf7877e386362d6187ffb284a1ceee1dea4cc5b7 - pristine_git_object: 44797bfad1b76ba809fab3791bffa2c78791e27b - src/mistralai/models/embeddingresponse.py: - id: c38279b9f663 - last_write_checksum: sha1:369740f705b08fede21edc04adf86505e55c9b76 - pristine_git_object: aae6fa60e131d4378bc631576b18f4d8a47f2770 - src/mistralai/models/embeddingresponsedata.py: - id: b73c5696eb71 - last_write_checksum: sha1:9709503bdde0a61603237fe6e84c410467e7e9f4 - pristine_git_object: 01e2765fb206b0ee36dfeb51cf3066613c74ac13 - src/mistralai/models/encodingformat.py: - id: 9f4fad7d5a9e + src/mistralai/client/models/embeddingrequest.py: + id: eadbe3f9040c + last_write_checksum: sha1:c4f85f5b768afb0e01c9a9519b58286804cfbd6b + pristine_git_object: 1dfe97c8fa2162719d2a68e7a0ef2f348efa1f88 + src/mistralai/client/models/embeddingresponse.py: + id: f7d790e84b65 + last_write_checksum: sha1:285531abf3a45de3193ed3c8b07818faac97eb32 + pristine_git_object: 64a28ea9f1c57ed6e69e1d49c5c83f63fa38fd36 + src/mistralai/client/models/embeddingresponsedata.py: + id: 6d6ead6f3803 + last_write_checksum: sha1:ed821591832ebfa03acd0ce0a3ca5a0521e6fa53 + pristine_git_object: ebd0bf7b29e0a1aee442337fd02ce562fb2c5a3d + src/mistralai/client/models/encodingformat.py: + id: b51ec296cc92 last_write_checksum: sha1:f9a3568cd008edb02f475a860e5849d9a40d0246 pristine_git_object: be6c1a14e4680e24e70b8bbda018759056b784ca - src/mistralai/models/entitytype.py: - id: 4d056950d537 - last_write_checksum: sha1:7087fb7ad2886188380cd692997b2850c950a6b8 - pristine_git_object: 8d2d4bbe837da3e21988548e09710ab629d1aacd - src/mistralai/models/eventout.py: - id: 2601c7113273 - last_write_checksum: sha1:93ba178c3f6459dbc638e49c3eddcc188c7ff5d0 - pristine_git_object: 3281903429b154eb095a7c41b1751cfef97e497d - src/mistralai/models/file.py: - id: 7c1aa0c610c0 - last_write_checksum: sha1:3735ec925554b397e36fd2322062f555fbcde270 - pristine_git_object: 682d7f6e24b736dabd0566ab1b45b20dae5ea019 - src/mistralai/models/filechunk.py: - id: ea6a1ad435e8 - last_write_checksum: sha1:56d91860c1c91c40662313ea6f156db886bb55b6 - pristine_git_object: 83e60cef29045ced5ae48b68481bce3317690b8e - src/mistralai/models/filepurpose.py: - id: 3928b3171a09 - last_write_checksum: sha1:2ffb9fd99624b7b9997f826526045a9a956fde14 - pristine_git_object: b109b35017d5aa086ac964d78163f41e64277874 - src/mistralai/models/files_api_routes_delete_fileop.py: - id: fa02d4d126c7 - last_write_checksum: sha1:c96b106d6496087673f6d1b914e748c49ec13755 - pristine_git_object: a84a7a8eee4b6895bb2e835f82376126b3e423ec - src/mistralai/models/files_api_routes_download_fileop.py: - id: 1dc2e2823a00 - last_write_checksum: sha1:6001bcf871ab76635abcb3f081b029c8154a191e - pristine_git_object: 168a7fa6701578b77876fe0bddeb1003d06f33b7 - src/mistralai/models/files_api_routes_get_signed_urlop.py: - id: 628ed2f82ce4 - last_write_checksum: sha1:c970025b1e453ad67298d12611542abb46ded54d - pristine_git_object: 708d40ab993f93227b9795c745383ab954c1c89c - src/mistralai/models/files_api_routes_list_filesop.py: - id: 865dd74c577c - last_write_checksum: sha1:d75afa1ee7e34cbcfb8da78e3b5c9384b684b89b - pristine_git_object: 84d61b9b4d7032a60e3055b683a396e53b625274 - src/mistralai/models/files_api_routes_retrieve_fileop.py: - id: d821f72ee198 - last_write_checksum: sha1:d0d07123fd941bb99a00a36e87bc7ab4c21506a6 - pristine_git_object: 0c2a95ef590f179fe60a19340e34adb230dd8901 - src/mistralai/models/files_api_routes_upload_fileop.py: - id: ccca25a2fe91 - last_write_checksum: sha1:64b1d3c3fe9323d40096798760c546dc1c30a57d - pristine_git_object: aeefe842b327c89c0a78ba3d6e4a1ccb8d4a25fe - src/mistralai/models/fileschema.py: - id: 8a02ff440be5 - last_write_checksum: sha1:55120d1d9322e9381d92f33b23597f5ed0e20e4c - pristine_git_object: 9a88f1bbdf34ffb619794be9c041635ff333e489 - src/mistralai/models/filesignedurl.py: - id: 6fe55959eedd - last_write_checksum: sha1:afbe1cdfbdf2f760fc996a5065c70fa271a35885 - pristine_git_object: 092be7f8090272bdebfea6cbda7b87d9877d59e8 - src/mistralai/models/fimcompletionrequest.py: - id: a54284b7041a - last_write_checksum: sha1:7e477e032b3a48fe08610dd5dc50dee0948950e9 - pristine_git_object: 801a358b02441b7537f4bae64e93b4308c720040 - src/mistralai/models/fimcompletionresponse.py: - id: 15f25c04c5dd - last_write_checksum: sha1:b7787a7dc82b31ed851a52ae2f0828cc8746d61e - pristine_git_object: f27972b9e6e2f9dc7837be7278fda4910755f1f4 - src/mistralai/models/fimcompletionstreamrequest.py: - id: ba6b92828dc7 - last_write_checksum: sha1:a8f2c6cbd5a41ad85b7d0faced90d8f05b29f646 - pristine_git_object: 2e8e6db2a21a86ffd7cc61f92fed5c55f19e2e50 - src/mistralai/models/finetuneablemodeltype.py: - id: cbd439e85b18 + src/mistralai/client/models/entitytype.py: + id: 62d6a6a13288 + last_write_checksum: sha1:baefd3e820f1682bbd75ab195d1a47ccb3d16a19 + pristine_git_object: 9c16f4a1c0e61f8ffaee790de181572891db3f89 + src/mistralai/client/models/eventout.py: + id: da8ad645a9cb + last_write_checksum: sha1:326b575403d313c1739077ad6eb9047ded15a6f5 + pristine_git_object: 5e118d4599e935bcd6196a7cbc1baae8f4a82752 + src/mistralai/client/models/file.py: + id: f972c39edfcf + last_write_checksum: sha1:40ddf9b7e6d3e9a77899cd9d32a9ac921c531c87 + pristine_git_object: a8bbc6fab46a49e7171cabbef143a9bbb48e763c + src/mistralai/client/models/filechunk.py: + id: ff3c2d33ab1e + last_write_checksum: sha1:9ae8d68bfcb6695cce828af08e1c9a9ce779f1f3 + pristine_git_object: d8b96f69285ea967397813ae53722ca38e8d6443 + src/mistralai/client/models/filepurpose.py: + id: a11e7f9f2d45 + last_write_checksum: sha1:154a721dbd5e0c951757a596a96e5d880ecf4982 + pristine_git_object: eef1b08999956fd45fe23f2c03bb24546207b4e3 + src/mistralai/client/models/files_api_routes_delete_fileop.py: + id: 2f385cc6138f + last_write_checksum: sha1:e7b7ad30a08b1033ecd5433da694f69a91029bfc + pristine_git_object: b71748669906990998cc79345f789ed50865e110 + src/mistralai/client/models/files_api_routes_download_fileop.py: + id: 8184ee3577c3 + last_write_checksum: sha1:7781932cc271d47a2965217184e1dd35a187de3f + pristine_git_object: fa9e491a95625dbedde33bc9ea344aaebf992902 + src/mistralai/client/models/files_api_routes_get_signed_urlop.py: + id: 0a1a18c6431e + last_write_checksum: sha1:797201cde755cf8e349b71dc2ff7ce56d1eabb73 + pristine_git_object: a05f826232396957a3f65cb1c38c2ae13944d43b + src/mistralai/client/models/files_api_routes_list_filesop.py: + id: b2e92f2a29b4 + last_write_checksum: sha1:711cc470b8dedefd2c2c7e2ae7dfa6c4601e0f30 + pristine_git_object: ace996318446667b2da3ca2d37bd2b25bcfbb7a7 + src/mistralai/client/models/files_api_routes_retrieve_fileop.py: + id: 5d5dbb8d5f7a + last_write_checksum: sha1:ea34337ee17bdb99ad89c0c6742fb80cb0b67c13 + pristine_git_object: 4a9678e5aa7405cbe09f59ffbdb6c7927396f06a + src/mistralai/client/models/files_api_routes_upload_fileop.py: + id: f13b84de6fa7 + last_write_checksum: sha1:3dc679de7b41abb4b0710ade631e818621b6f3bc + pristine_git_object: 723c6cc264613b3670ac999829e66131b8424849 + src/mistralai/client/models/fileschema.py: + id: 19cde41ca32a + last_write_checksum: sha1:29fe7d4321fc2b20ae5fa349f30492aeb155c329 + pristine_git_object: 9ecde454f0dac17997ef75e5cdb850cccc8020fe + src/mistralai/client/models/filesignedurl.py: + id: a1754c725163 + last_write_checksum: sha1:0987cc364694efd61c62ba15a57cfb74aa0d0cc8 + pristine_git_object: cbca9847568ab7871d05b6bb416f230d3c9cddfc + src/mistralai/client/models/fimcompletionrequest.py: + id: cf3558adc3ab + last_write_checksum: sha1:a62845c9f60c8d4df4bfaa12e4edbb39dcc5dcb7 + pristine_git_object: c9eca0af3ccacfd815bfb8b11768e289b4828f4e + src/mistralai/client/models/fimcompletionresponse.py: + id: b860d2ba771e + last_write_checksum: sha1:00b5b7146932f412f8230da7164e5157d267a817 + pristine_git_object: 8a2eda0ced48f382b79e5c6d7b64b0c5f0b16c15 + src/mistralai/client/models/fimcompletionstreamrequest.py: + id: 1d1ee09f1913 + last_write_checksum: sha1:9260ae9a12c37b23d7dfa8ec6d3029d1d8a133ed + pristine_git_object: 2954380238dec5540e321012b8aa6609e404114c + src/mistralai/client/models/finetuneablemodeltype.py: + id: 05e097395df3 last_write_checksum: sha1:8694f016e8a4758308225b92b57bee162accf9d7 pristine_git_object: f5b8b2ed45b56d25b387da44c398ae79f3a52c73 - src/mistralai/models/ftclassifierlossfunction.py: - id: 95255316968d + src/mistralai/client/models/ftclassifierlossfunction.py: + id: d21e2a36ab1f last_write_checksum: sha1:69e08ab728e095b8e3846ed2dc142aa1e82a864a pristine_git_object: c4ef66e0fe69edace4912f2708f69a6e606c0654 - src/mistralai/models/ftmodelcapabilitiesout.py: - id: 1bc9230e1852 - last_write_checksum: sha1:c841f76ba219c82e3324b69ad8eba4abd522d0b9 - pristine_git_object: 7f3aa18b982c11fb6463e96333250b632dd195c8 - src/mistralai/models/ftmodelcard.py: - id: 4f25bcf18e86 - last_write_checksum: sha1:f1d80e6aa664e63b4a23a6365465d42415fc4bbb - pristine_git_object: 1c3bd04da0cc2bc86bec97d7890ad6594879b334 - src/mistralai/models/function.py: - id: 66b7b7ab8fc4 - last_write_checksum: sha1:5da05a98ca5a68c175bd212dd41127ef98013da6 - pristine_git_object: 7d40cf758ffbb3b6b4e62b50274829bd1c809a9c - src/mistralai/models/functioncall.py: - id: 5e03760bb753 - last_write_checksum: sha1:20d2a8196b6ccaffe490b188b1482a309b2dce79 - pristine_git_object: 0cce622a4835fcbd9425928b115a707848c65f54 - src/mistralai/models/functioncallentry.py: - id: 1d5c6cef6e92 - last_write_checksum: sha1:f357b1fde226c52c0dc2b105df66aeb6d17ab1bf - pristine_git_object: 4ea62c4ffc671b20d35cd967f3da0f1a34c92e2e - src/mistralai/models/functioncallentryarguments.py: - id: bd63a10181da + src/mistralai/client/models/ftmodelcapabilitiesout.py: + id: f70517be97d4 + last_write_checksum: sha1:44260fefae93bc44a099ff64eeae7657c489005c + pristine_git_object: be31aa3c14fb8fe9154ad8f54e9bf43f586951c7 + src/mistralai/client/models/ftmodelcard.py: + id: c4f15eed2ca2 + last_write_checksum: sha1:a6a71ce4a89688cb4780697e299a4274f7323e24 + pristine_git_object: 36cb723df8bcde355e19a55105932298a8e2e33a + src/mistralai/client/models/function.py: + id: 32275a9d8fee + last_write_checksum: sha1:f98db69c2fb49bbd6cff36fb4a25e348db6cd660 + pristine_git_object: 6e2b52edbd8d7cb6f7654eb76b7ca920636349cf + src/mistralai/client/models/functioncall.py: + id: 393fca552632 + last_write_checksum: sha1:ef22d048ddb5390f370fcf3405f4d46fa82ed574 + pristine_git_object: 6cb6f26e6c69bc134bcb45f53156e15e362b8a63 + src/mistralai/client/models/functioncallentry.py: + id: cd058446c0aa + last_write_checksum: sha1:661372b1ff4505cf7039ece11f12bb1866688bed + pristine_git_object: fce4d387df89a9fa484b0c7cc57556ea13278469 + src/mistralai/client/models/functioncallentryarguments.py: + id: 3df3767a7b93 last_write_checksum: sha1:6beb9aca5bfc2719f357f47a5627c9edccef051f pristine_git_object: ac9e6227647b28bfd135c35bd32ca792d8dd414b - src/mistralai/models/functioncallevent.py: - id: 868025c914c8 - last_write_checksum: sha1:4eb5b07218c9ab923cbe689e3de116d14281a422 - pristine_git_object: e3992cf173907a485ced9ec12323a680613e9e6a - src/mistralai/models/functionname.py: - id: 46a9b195fef5 - last_write_checksum: sha1:2219be87b06033dad9933b2f4efd99a4758179f1 - pristine_git_object: 0a6c0b1411b6f9194453c9fe22d52d035eb80c4f - src/mistralai/models/functionresultentry.py: - id: d617bbe28e36 - last_write_checksum: sha1:a781805577eb871b4595bae235c1d25e2e483fdc - pristine_git_object: 1c61395a82830dc689f2e011b9e6c86eba58cda3 - src/mistralai/models/functiontool.py: - id: e1b3d619ef0b - last_write_checksum: sha1:31e375a2222079e9e70459c55ff27a8b3add869d - pristine_git_object: 009fe28008a166d551566378e3c2730963aca591 - src/mistralai/models/githubrepositoryin.py: - id: e7f21180a768 - last_write_checksum: sha1:b4f630e15057e4ff8bfc5fb7ba2f0085a76c5f06 - pristine_git_object: b16ce0d2898b000f08e3d960a3411941a2324473 - src/mistralai/models/githubrepositoryout.py: - id: a3e494bbd813 - last_write_checksum: sha1:00a9bc4d6308cd960077fb639b1778723a71f583 - pristine_git_object: 372477c106a37b1b9d5cec02751c63fb08abcf53 - src/mistralai/models/httpvalidationerror.py: - id: 224ee4b3f0f0 - last_write_checksum: sha1:3f8d51b670993863fcd17421d1ace72e8621fd51 - pristine_git_object: d467577af04921f5d9bfa906ae6f4e06055a8785 - src/mistralai/models/imagegenerationtool.py: - id: 63bbe395acb2 - last_write_checksum: sha1:404e9cbabada212b87cc2e0b8799a18ff1cecf95 - pristine_git_object: a92335dbd2d0d03be5c2df4132df1cc26eaf38dd - src/mistralai/models/imageurl.py: - id: 20116779b5a0 - last_write_checksum: sha1:2d6090577370f5eb2e364029a11bb61bd86ef226 - pristine_git_object: 6f077b69019fbc598ddc402ba991c83f8a047632 - src/mistralai/models/imageurlchunk.py: - id: 0a6e87c96993 - last_write_checksum: sha1:0b7e4c0d5129698b1b01608eb59b27513f6a9818 - pristine_git_object: 8e8aac4238381527d9156fcb72288b28a82f9689 - src/mistralai/models/inputentries.py: - id: cbf378d5b92a + src/mistralai/client/models/functioncallevent.py: + id: 23b120b8f122 + last_write_checksum: sha1:535874a4593ce1f40f9683fa85159e4c4274f3ee + pristine_git_object: 4e040585285985cebc7e26ac402b6df8f4c063bb + src/mistralai/client/models/functionname.py: + id: 000acafdb0c0 + last_write_checksum: sha1:03d7b26a37311602ae52a3f6467fe2c306c468c1 + pristine_git_object: 2a05c1de42a6ff5775af5509c106eaa7b391778e + src/mistralai/client/models/functionresultentry.py: + id: 213df39bd5e6 + last_write_checksum: sha1:7e6d951cfd333f9677f4c651054f32658794cc48 + pristine_git_object: a843bf9bdd82b5cf3907e2172ed793a391c5cba2 + src/mistralai/client/models/functiontool.py: + id: 2e9ef5800117 + last_write_checksum: sha1:8ab806567a2ab6c2e04cb4ce394cbff2ae7aad50 + pristine_git_object: 74b50d1bcd2bc0af658bf5293c8cc7f328644fa1 + src/mistralai/client/models/githubrepositoryin.py: + id: eef26fbd2876 + last_write_checksum: sha1:3b64fb4f34e748ef71fa92241ecdd1c73aa9485a + pristine_git_object: e56fef9ba187792238991cc9373a7d2ccf0b8c0d + src/mistralai/client/models/githubrepositoryout.py: + id: d2434a167623 + last_write_checksum: sha1:d2be5c474d3a789491cad50b95e3f25933b0c66a + pristine_git_object: e3aa9ebc52e8613b15e3ff92a03593e2169dc935 + src/mistralai/client/models/httpvalidationerror.py: + id: 4099f568a6f8 + last_write_checksum: sha1:81432fd45c6faac14a6b48c6d7c85bbc908b175c + pristine_git_object: 34d9b54307db818e51118bc448032e0476688a35 + src/mistralai/client/models/imagegenerationtool.py: + id: e1532275faa0 + last_write_checksum: sha1:7eaea320c1b602df2e761405644361820ca57d33 + pristine_git_object: e09dba81314da940b2be64164e9b02d51e72f7b4 + src/mistralai/client/models/imageurl.py: + id: e4bbf5881fbf + last_write_checksum: sha1:d300e69742936f6e6583f580091827ada7da6c20 + pristine_git_object: 6e61d1ae2ec745774345c36e605748cf7733687b + src/mistralai/client/models/imageurlchunk.py: + id: 746fde62f637 + last_write_checksum: sha1:2311445f8c12347eab646f1b9ff7c4202642c907 + pristine_git_object: f967a3c8ced6d5fb4b274454100134e41c5b7a5c + src/mistralai/client/models/inputentries.py: + id: 44727997dacb last_write_checksum: sha1:afc03830974af11516c0b997f1cd181218ee4fb0 pristine_git_object: 8ae29837a6c090fbe1998562684d3a372a9bdc31 - src/mistralai/models/inputs.py: - id: a53031bc9cb6 - last_write_checksum: sha1:94290a72cb6cfa40813bc79a66a463978ae9ae1c - pristine_git_object: 34d20f3428a5d994c4a199c411dc8097b3c259d7 - src/mistralai/models/instructrequest.py: - id: d23d1da148c8 - last_write_checksum: sha1:2c4f4babc9944f90bc725bb0c460c8de85b3d75e - pristine_git_object: dddbda00a418629462e3df12a61a6b1c56c1d2bd - src/mistralai/models/jobin.py: - id: 42f6df34c72e - last_write_checksum: sha1:e5a78c9a2cd48fb1d7d062ec2f8d54f8d3ac493e - pristine_git_object: aa0cd06c704902919f672e263e969630df783ef6 - src/mistralai/models/jobmetadataout.py: - id: eaa2e54e2e2b - last_write_checksum: sha1:90afd144e2f9ec77c3be2694db1d96e4bc23fecb - pristine_git_object: 10ef781ebbba4c5eaab6f40f5d5f9f828944c983 - src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py: - id: 5d3a14d60da7 - last_write_checksum: sha1:4925f408587e91581c0181baf9acd1dcb5a50768 - pristine_git_object: 5b83d534d7efd25c0bc47406c79dfd59e22ec1d6 - src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py: - id: 74c718778882 - last_write_checksum: sha1:92a89c2d0384b2251636a61113310c84da0001bf - pristine_git_object: 9bfaf9c5230e4a1cc0187faeedc78ebcaaf38b98 - src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py: - id: 072c77cfbaa5 - last_write_checksum: sha1:f890bc21fa71e33a930d48cdbf18fd503419406c - pristine_git_object: c48246d54c696bd85fbe67348d5eef1a2a1944db - src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py: - id: db002a822be0 - last_write_checksum: sha1:3a1019f200193556df61cbe3786b03c2dbab431f - pristine_git_object: d728efd175f1df6b59b74d0b2fa602c0e0199897 - src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py: - id: ad69f51c764d - last_write_checksum: sha1:c84477987738a389ddf88546060263ecfb46506a - pristine_git_object: ceb19a69131958a2de6c3e678c40a1ca5d35fd73 - src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py: - id: a5c2c6e89b85 - last_write_checksum: sha1:dfb755d386e7c93540f42392f18efae7f61c4625 - pristine_git_object: 39af3ea6fab66941faf7718d616ff2a386e8219b - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py: - id: 221ec5d0482f - last_write_checksum: sha1:f2ce2c6a8924deda372d749ea2a09a2526b8da44 - pristine_git_object: be99dd2d329f5921513ba3ad6e5c5a9807d1a363 - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py: - id: bd0fd94f34fc - last_write_checksum: sha1:48390cf76ffc1d712e33bd0bcece8dea956e75cb - pristine_git_object: 9aec8eb25c54e8fecedd9dd9e823ccf32c1a36b8 - src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py: - id: cba224459ae6 - last_write_checksum: sha1:238eeb9b7f48ff4e3262cc0cc5e55d96fe565073 - pristine_git_object: 8103b67b55eab0f9197cd9fb421e6ea4ca10e76e - src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py: - id: ecc5a3420980 - last_write_checksum: sha1:8e026bc610fead1e55886c741f6b38817bb6b2ff - pristine_git_object: a84274ff5b2c45f2adc2c0234db090c498decc51 - src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py: - id: 3e8d8e70d526 - last_write_checksum: sha1:a5538fcb4248fd83749dc303f9585d7354ff8b92 - pristine_git_object: a10528ca0f7056ef82e0aeae8f4262c65e47791d - src/mistralai/models/jobsout.py: - id: bb1000b03e73 - last_write_checksum: sha1:d06d7b33e5630d45795efc2a8443ae3070866b07 - pristine_git_object: 680b1d582bc8fbce17a381be8364333dd87ce333 - src/mistralai/models/jsonschema.py: - id: 4bcf195c31bb - last_write_checksum: sha1:a0d2b72f809e321fc8abf740e57ec39a384c09d4 - pristine_git_object: e2b6a45e5e5e68b6f562dc39519ab12ffca50322 - src/mistralai/models/legacyjobmetadataout.py: - id: 172ade2efb26 - last_write_checksum: sha1:bf608218a88f7e59cd6c9d0958940b68a200ba0d - pristine_git_object: 499512197a9f9600ac9f7cee43f024dde67fd775 - src/mistralai/models/libraries_delete_v1op.py: - id: ef50051027ec - last_write_checksum: sha1:2a9632da75355679918714a68b96e3ddf88fa5d3 - pristine_git_object: 56f8f8a8706b7aac67cf9b156a2e8710a4fdef36 - src/mistralai/models/libraries_documents_delete_v1op.py: - id: e18557420efe - last_write_checksum: sha1:6904ea388795a0b5f523959c979cf9b3a2c3ef4e - pristine_git_object: c33710b0e29664594891055c36199ea4846516dc - src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py: - id: c8df3283cb98 - last_write_checksum: sha1:fefde9e22a010f900bd9012a2d438f909d54815f - pristine_git_object: e2459c1c68c81eb67983ac76de23dd8609420291 - src/mistralai/models/libraries_documents_get_signed_url_v1op.py: - id: 279ac5d9f945 - last_write_checksum: sha1:8ee5b6386f98d2af619f070e83e1f3772c07e199 - pristine_git_object: bc913ba56bd98d9937ddd5516837b5a8ead10454 - src/mistralai/models/libraries_documents_get_status_v1op.py: - id: ded8f142264f - last_write_checksum: sha1:ac1f85ecb74ef43e6e831794badbbd57e99f7028 - pristine_git_object: 08992d7c9ee5ba85ef97971fa6e06af465e39fa9 - src/mistralai/models/libraries_documents_get_text_content_v1op.py: - id: 497b693d0ba6 - last_write_checksum: sha1:11eeb61bab8b745ba22f2087393ba0cf91b76180 - pristine_git_object: 21a131ad6448597a996f7d96723f6bc8cf12ddf0 - src/mistralai/models/libraries_documents_get_v1op.py: - id: 7b1e6957ca40 - last_write_checksum: sha1:a3e3d1dee18ee2900417db836b1f8b49a14e0501 - pristine_git_object: ff2bdedbcaa8cf4c8e31091ed529274bf5d3ec04 - src/mistralai/models/libraries_documents_list_v1op.py: - id: d5cc573ae1a0 - last_write_checksum: sha1:43b6af0f23ff88d6e13f48acf12baa01a03eb243 - pristine_git_object: e6ff29cf4edb7b269cd66c5299b7531b13973dd2 - src/mistralai/models/libraries_documents_reprocess_v1op.py: - id: 3e832394e71b - last_write_checksum: sha1:36ced698b57573338eb95f5d70983ba4b9dcb0e0 - pristine_git_object: 861993e7e0fd06576e878758a44029613d381a4c - src/mistralai/models/libraries_documents_update_v1op.py: - id: 902a2c649e04 - last_write_checksum: sha1:c8ba64250a66dbdd9ac409ffeccb6bb75ba619c2 - pristine_git_object: 5551d5eec7961a5cc0fa9018ba680304e1f99d57 - src/mistralai/models/libraries_documents_upload_v1op.py: - id: a4586d35c41c - last_write_checksum: sha1:83c40a6b1a790d292c72c90847926d458ea73d83 - pristine_git_object: 51f536cca6141b0243d3c3fff8da3224a0c51ea5 - src/mistralai/models/libraries_get_v1op.py: - id: ed8ae2dc35b4 - last_write_checksum: sha1:c9dc682319790ec77c3827b44e3e8937de0de17f - pristine_git_object: b87090f6bb56c7f7d019483c0e979f9f2fdc3378 - src/mistralai/models/libraries_share_create_v1op.py: - id: 6a5d94d8a3dc - last_write_checksum: sha1:312ec2ea1635e86da293a0f402498031591c9854 - pristine_git_object: a8b0e35db9a452a62dbc0893009a9708684d2a23 - src/mistralai/models/libraries_share_delete_v1op.py: - id: 474f847642a7 - last_write_checksum: sha1:557000669df73a160d83bcaaf456579890fa7f92 - pristine_git_object: e29d556a73a87a6f799948f05517a50545dfd79e - src/mistralai/models/libraries_share_list_v1op.py: - id: 5ccdc4491119 - last_write_checksum: sha1:c3ca37074f14aad02a9d01099fe7134204d5520e - pristine_git_object: b276d756e95e9e7dc53cd7ff5da857052c055046 - src/mistralai/models/libraries_update_v1op.py: - id: 6de043d02383 - last_write_checksum: sha1:0936d1273af7659d7283c1defc2094178bc58003 - pristine_git_object: c93895d97f165d4fa4cc33097f6b772b55337623 - src/mistralai/models/libraryin.py: - id: 0277ef6b7a58 - last_write_checksum: sha1:56e033aef199fd831da7efff829c266206134f99 - pristine_git_object: 872d494d66abde55130a6d2a6c30de950f51232c - src/mistralai/models/libraryinupdate.py: - id: 96904d836434 - last_write_checksum: sha1:50c13a51aee5fc6c562090dad803ca6b3a1a5bed - pristine_git_object: 6e8ab81acae479e5fb999c91bfc55f6e1cbee5cc - src/mistralai/models/libraryout.py: - id: e483109c6e21 - last_write_checksum: sha1:6394431205bd4c308de4ee600e839ac0c6624fc0 - pristine_git_object: d3bc36f94735fbabb23d6c19ff481e404227f548 - src/mistralai/models/listdocumentout.py: - id: 872891f10a41 - last_write_checksum: sha1:61f444f7318e20921ddda1efd1e63e9bbec1d93d - pristine_git_object: 9d39e0873f463cce5fca723a3c85f47cf0f6ddeb - src/mistralai/models/listfilesout.py: - id: 43a961a42ca8 - last_write_checksum: sha1:d3e0d056a8337adaffced63e2ed5b4b37a60927d - pristine_git_object: 2f82b37db7f3cb69d68ab097f9f75488939f66c8 - src/mistralai/models/listlibraryout.py: - id: dcd1a940efe5 - last_write_checksum: sha1:7dc2876bf50861c8e94079859725cadf2d7b14c4 - pristine_git_object: 1e647fe1db65421d73ba6e0f35cc580e99ea7212 - src/mistralai/models/listsharingout.py: - id: c04e23806a57 - last_write_checksum: sha1:efd9e780445bdcf4a4e7794cd1aedaa85067f904 - pristine_git_object: 38c0dbe0ab9aeb3c977e38f2bf95d84297456980 - src/mistralai/models/messageentries.py: - id: 2e456a2494da + src/mistralai/client/models/inputs.py: + id: 84a8007518c7 + last_write_checksum: sha1:3ecd986b0f5a0de3a4c88f06758cfa51068253e9 + pristine_git_object: fb0674760c1191f04e07f066e84ae9684a1431e3 + src/mistralai/client/models/instructrequest.py: + id: 6d3ad9f896c7 + last_write_checksum: sha1:5f8857f8fffe0b858cfc7bec268480003b562303 + pristine_git_object: 1b2f269359700582687fdf4492ea3cef64da48bb + src/mistralai/client/models/jobin.py: + id: f4d176123ccc + last_write_checksum: sha1:c1ec4b9ea0930612aea1b1c5c5cd419379ab0687 + pristine_git_object: dc7684fcbecd558fc6e3e3f17c4000ec217285c1 + src/mistralai/client/models/jobmetadataout.py: + id: 805f41e3292a + last_write_checksum: sha1:5f84c58dab92d76de8d74f2e02cdf7b2b4c9cc12 + pristine_git_object: f91e30c09232b5227972b3b02ba5efbde22ac387 + src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py: + id: b56cb6c17c95 + last_write_checksum: sha1:e5e2c422bb211bb4af3e8c1a4b48e491d0fdf5a4 + pristine_git_object: 21a04f7313b3594a204395ca080b76e2a4958c63 + src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py: + id: 36b5a6b3ceee + last_write_checksum: sha1:5ada7f2b7a666f985c856a6d9cab1969928c9488 + pristine_git_object: 32e34281cd188f4d6d23d100fe0d45002030c56b + src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py: + id: d8f0af99c94d + last_write_checksum: sha1:3026ea0231866e792dd3cf83eb2b2bac93eda61b + pristine_git_object: 3557e773860e94d85f7a528d000f03adfcc60c2f + src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py: + id: 34f89d2af0ec + last_write_checksum: sha1:2a7225666b02d42be0d3455a249a962948feadf9 + pristine_git_object: 4536b738442ec9710ddf67f2faf7d30b094d8cd5 + src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py: + id: d175c6e32ecb + last_write_checksum: sha1:07bfc80146492e3608a5c1683e4530de296c0938 + pristine_git_object: b36d3c3ef5abb30abc886876bb66384ea41bab9e + src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py: + id: 81651291187a + last_write_checksum: sha1:eb265e749cc076b2d39c103df48ceeeda6da7f5a + pristine_git_object: ece0d15a0654ec759904276ad5d95c5619ff016f + src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py: + id: d910fd8fe2d6 + last_write_checksum: sha1:7ee82991b49a615517b3323abbfc0e5928419890 + pristine_git_object: aa5a26098e084885e8c2f63944e7549969899d3c + src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py: + id: cf43028824bf + last_write_checksum: sha1:3fd6b5c7c9ae24d662abd5d3c7ea9699e295e5ff + pristine_git_object: 7e399b31354e4f09c43efbe9ffe3d938f6af0d8c + src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py: + id: e7ff4a4a4edb + last_write_checksum: sha1:176fef64d07c58da36ca6672ce5440508787dc84 + pristine_git_object: ed5938b039be719169e62e033b7735bde7e72503 + src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py: + id: 7cc1c80335a9 + last_write_checksum: sha1:4270cb52e5aef807ec2d8a9ab1ca1065b0cf8a10 + pristine_git_object: e1be0ac00af889a38647b5f7e4f9d26ed09ee7c4 + src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py: + id: 6d9dc624aafd + last_write_checksum: sha1:1a8054c02cd8fd3c48954812e153e97efa58aaef + pristine_git_object: a2b70b37e349c7f5fc6c687fbad015eb218de952 + src/mistralai/client/models/jobsout.py: + id: 22e91e9631a9 + last_write_checksum: sha1:f2a5aa117953410f0743c2dd024e4a462a0be105 + pristine_git_object: 9087704f0660e39f662efbd36f39713202598c43 + src/mistralai/client/models/jsonschema.py: + id: e1fc1d8a434a + last_write_checksum: sha1:6289875b78fab12efa9e3a4aa4bebdb08a95d332 + pristine_git_object: db2fa55ba9001bd3715451c15e9661a87ff7501a + src/mistralai/client/models/legacyjobmetadataout.py: + id: 4f44aa38c864 + last_write_checksum: sha1:b6aba9032bb250c5a23f2ff2a8521b7bddcd1a06 + pristine_git_object: 155ecea78cb94fc1a3ffaccc4af104a8a81c5d44 + src/mistralai/client/models/libraries_delete_v1op.py: + id: b2e8bbd19baa + last_write_checksum: sha1:566db1febc40c73476af31a27201a208b64bc32a + pristine_git_object: fa447de067518abb355b958954ff9a3ee9b2cf6d + src/mistralai/client/models/libraries_documents_delete_v1op.py: + id: 81eb34382a3d + last_write_checksum: sha1:c7bd801e5f75d1716101721cd3e711be978cb7c5 + pristine_git_object: bc5ec6e5443b32d47e570c4f43c43827928a3e39 + src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py: + id: a7417ebd6040 + last_write_checksum: sha1:a298e22d9a68de87288419717b03273c1a26de6e + pristine_git_object: 24ed897d305cfccdc2b9717e214da901479cc70e + src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py: + id: d4b7b47913ba + last_write_checksum: sha1:0855bb39a09514fb5709bd3674eb5fcc618299f1 + pristine_git_object: 350c8e73992583b7890889c5ff252096a8df7fbd + src/mistralai/client/models/libraries_documents_get_status_v1op.py: + id: f314f73e909c + last_write_checksum: sha1:ca4679fbdc833b42e35b4c015ddf8434321d86eb + pristine_git_object: 92b077d3b5850985cac73ee880de7eab31a5b8fd + src/mistralai/client/models/libraries_documents_get_text_content_v1op.py: + id: 1ca4e0c41321 + last_write_checksum: sha1:8dbd91ab145d4c01e91502c9349477e1f98551d7 + pristine_git_object: 68f9725a1a390028e3118611bb0df1b4ab103943 + src/mistralai/client/models/libraries_documents_get_v1op.py: + id: 26ff35f0c69d + last_write_checksum: sha1:208b7ca22416295d27f51513e3fe58947e1549c7 + pristine_git_object: a67e687eaffebbee81654bbbb78ad00bcc28999c + src/mistralai/client/models/libraries_documents_list_v1op.py: + id: 756f26de3cbe + last_write_checksum: sha1:a742a58c137ecf1cfd7446d5f2f60211ff087751 + pristine_git_object: 5dec33858719e713c0fa07538aa0dfcab8d69dad + src/mistralai/client/models/libraries_documents_reprocess_v1op.py: + id: dbbeb02fc336 + last_write_checksum: sha1:516691f61c18e18b96738360a85acd34ba415ca0 + pristine_git_object: 8aee75522f7677e9f6fc49e2f8c5a75124db3dc7 + src/mistralai/client/models/libraries_documents_update_v1op.py: + id: 734ba6c19f5f + last_write_checksum: sha1:929f437a1c366b6cbecfc86b43436767712327f8 + pristine_git_object: f677b4ddc96b51ecd777240844800b2634ca4358 + src/mistralai/client/models/libraries_documents_upload_v1op.py: + id: "744466971862" + last_write_checksum: sha1:63b6f82a3ed8b0655d3b5dea1811699553d62cb0 + pristine_git_object: e2d59d9f1556ca77c0666b2bba3213ef5386f82a + src/mistralai/client/models/libraries_get_v1op.py: + id: d493f39e7ebb + last_write_checksum: sha1:d61166f6c399516d905c7376fabe56c102265747 + pristine_git_object: 83ae377d245e5c93a4a9118dd049a9096e9f3074 + src/mistralai/client/models/libraries_share_create_v1op.py: + id: feaacfd46dd3 + last_write_checksum: sha1:66ddb6685924e1702cfc40dbcb9a0d2e525cb57d + pristine_git_object: d0313bd01acd6e5403402d0d80a604a6c2812e19 + src/mistralai/client/models/libraries_share_delete_v1op.py: + id: 7f3a679ca384 + last_write_checksum: sha1:3ac568a5e09a6c74bc6779cd9c0bc3df36b24785 + pristine_git_object: 620527d50c15f5b14307e7735b429fe194469ed5 + src/mistralai/client/models/libraries_share_list_v1op.py: + id: 8f0af379bf1c + last_write_checksum: sha1:3d764be7232233229dc79079101270ace179e65f + pristine_git_object: fd5d9d33ce4b757b369d191621a727f71b5d2e35 + src/mistralai/client/models/libraries_update_v1op.py: + id: 92c8d4132252 + last_write_checksum: sha1:482c5b78278a6e729ed980191c6c1b94dbd890e6 + pristine_git_object: c434ab7a8be94042e6add582520dba11dc9d8d01 + src/mistralai/client/models/libraryin.py: + id: 6147d5df71d9 + last_write_checksum: sha1:5b7fe7a4bde80032bd36fad27f5854ad4bb1832f + pristine_git_object: a7b36158a165ab5586cba26cc1f96ab6fe938501 + src/mistralai/client/models/libraryinupdate.py: + id: 300a6bb02e6e + last_write_checksum: sha1:95060dfcdafbfe2deb96f450b128cd5d6f4e0e5a + pristine_git_object: f0241ba17f95b2c30a102bf1d09ac094c6e757e5 + src/mistralai/client/models/libraryout.py: + id: 4e608c7aafc4 + last_write_checksum: sha1:4089ffe9adc8e561b9ec093330c276de653bff7f + pristine_git_object: d1953f16490d40876d05cdd615a3ae8cbcbfd9f6 + src/mistralai/client/models/listdocumentout.py: + id: b2c96075ce00 + last_write_checksum: sha1:13c5461b89970ae00cdce8b80045ed586fd113b7 + pristine_git_object: 24969a0f6dc3d2e0badd650a2694d1ffa0062988 + src/mistralai/client/models/listfilesout.py: + id: ae5fa21b141c + last_write_checksum: sha1:2ef7f78253cde73c3baae6aebeda6568bcb96c0d + pristine_git_object: 1db17c406778ac201dfcc1fd348a3e1176f05977 + src/mistralai/client/models/listlibraryout.py: + id: cb78c529e763 + last_write_checksum: sha1:044d3d17138c3af1feba6b980f92f8db7bd64578 + pristine_git_object: 24aaa1a9874d0e2054f6a49efe0f70101cec2fb2 + src/mistralai/client/models/listsharingout.py: + id: ee708a7ccdad + last_write_checksum: sha1:0644f080e93a533f40579b8c59e5039dea4ee02d + pristine_git_object: f139813f54e97810502d658ad924911de646ab09 + src/mistralai/client/models/messageentries.py: + id: e13f9009902b last_write_checksum: sha1:7c2503f4a1be5e5dde5640dd7413fed06aee09b4 pristine_git_object: 9b1706dee5fbb5cae18674708a1571b187bf0576 - src/mistralai/models/messageinputcontentchunks.py: - id: 344669e96a85 + src/mistralai/client/models/messageinputcontentchunks.py: + id: 01025c12866a last_write_checksum: sha1:740e82b72d5472f0cc967e745c07393e2df8ae38 pristine_git_object: e90d8aa0317e553bfc0cceb4a356cf9994ecfb60 - src/mistralai/models/messageinputentry.py: - id: 2e0500be6230 - last_write_checksum: sha1:118ffb7715993d7c103be5d26894ce33d8437f8a - pristine_git_object: edf05631be8d89002fd3a3bfb3034a143b12ed21 - src/mistralai/models/messageoutputcontentchunks.py: - id: e8bb72ef0c0f + src/mistralai/client/models/messageinputentry.py: + id: c0a4b5179095 + last_write_checksum: sha1:def6a5ce05756f76f7da6504bfc25eea166b21ab + pristine_git_object: 12a31097a88e90645c67a30451a379427cd4fcd3 + src/mistralai/client/models/messageoutputcontentchunks.py: + id: 2ed248515035 last_write_checksum: sha1:f239151ae206f6e82ee3096d357ff33cf9a08138 pristine_git_object: 136a7608e7e2a612d48271a7c257e2bb383584f3 - src/mistralai/models/messageoutputentry.py: - id: 0113bf848952 - last_write_checksum: sha1:3a1569ef7b3efadb87418d3ed38a6df0710cca1b - pristine_git_object: 0e2df81e3e75841d31bafd200697e9fd236b6fbe - src/mistralai/models/messageoutputevent.py: - id: d194af351767 - last_write_checksum: sha1:b9c4bf8db3d22d6b01d79044258729b5daafc050 - pristine_git_object: 751767a31666e839ec35d722707d97db605be25f - src/mistralai/models/metricout.py: - id: "369168426763" - last_write_checksum: sha1:d245a65254d0a142a154ee0f453cd7b64677e666 - pristine_git_object: 930b5c2181d4c5c5d89474b66fc1a4eef7ca7865 - src/mistralai/models/mistralerror.py: - id: 89288c78040b + src/mistralai/client/models/messageoutputentry.py: + id: a07577d2268d + last_write_checksum: sha1:d0ca07d6bf6445a16761889bf04a5851abe21ea3 + pristine_git_object: d52e4e3e722ef221f565a0bd40f505385974a0e1 + src/mistralai/client/models/messageoutputevent.py: + id: a2bbf63615c6 + last_write_checksum: sha1:19dda725e29108b2110903e7883ce442e4e90bd4 + pristine_git_object: 3db7f5a0908a72f75f6f7303af4ad426a4909d84 + src/mistralai/client/models/metricout.py: + id: 92d33621dda7 + last_write_checksum: sha1:056f6e7e76182df649804034d722c5ad2e43294f + pristine_git_object: f8027a69235861ae8f04ccc185d61fa13cc8cc14 + src/mistralai/client/models/mistralerror.py: + id: 68ffd8394c2e last_write_checksum: sha1:07fce1e971a25d95ffa8c8f3624d62cdf96e353a pristine_git_object: 28cfd22dc3d567aa4ae55cc19ad89341fa9c96a1 - src/mistralai/models/mistralpromptmode.py: - id: b2580604c1fe - last_write_checksum: sha1:71cf04622681998b091f51e4157463109761333f - pristine_git_object: dfb6f2d2a76fd2749d91397752a38b333bae8b02 - src/mistralai/models/modelcapabilities.py: - id: a9589b97b15c - last_write_checksum: sha1:56ea040fb631f0825e9ce2c7b32de2c90f6923a1 - pristine_git_object: 6edf8e5bf238b91a245db3489f09ae24506103f3 - src/mistralai/models/modelconversation.py: - id: 7d8b7b8d62a8 - last_write_checksum: sha1:b76cc407f807c19c1ff5602f7dd1d0421db2486d - pristine_git_object: 8eca4f973cd20e8bcb70a519f8dc3749878f04a2 - src/mistralai/models/modellist.py: - id: 22085995d513 - last_write_checksum: sha1:f753c11b430f8dd4daffb60bef467c6fa20f5e52 - pristine_git_object: 394cb3fa66a8881b013f78f1c8ee5440c9933427 - src/mistralai/models/moderationobject.py: - id: de835c5cd36e - last_write_checksum: sha1:24befa2934888192a12d9954749b8e591eb22582 - pristine_git_object: 5eff2d2a100c96eb7491ca99716fc9523fb74643 - src/mistralai/models/moderationresponse.py: - id: 831711e73705 - last_write_checksum: sha1:a96af206b8cd7c161c77cde0d3720880f20cf7f8 - pristine_git_object: ed13cd6bc226e8e505ef248760374c795705440f - src/mistralai/models/no_response_error.py: - id: 3102fe819ad6 + src/mistralai/client/models/mistralpromptmode.py: + id: 95abc4ec799a + last_write_checksum: sha1:ed0b87853d373d830b6572cbdf99d64f167b1d48 + pristine_git_object: 7008fc055bd1031096b7a486a17bf9a5b7841a4c + src/mistralai/client/models/modelcapabilities.py: + id: 64d8a422ea29 + last_write_checksum: sha1:3857f4b989eeed681dffe387d48d66f880537db6 + pristine_git_object: a6db80e73189addcb1e1951a093297e0523f5fa4 + src/mistralai/client/models/modelconversation.py: + id: fea0a651f888 + last_write_checksum: sha1:35fec41b1dac4a83bdf229de5dd0436916b144c8 + pristine_git_object: 574f053d4186288980754ead28bb6ce19b414064 + src/mistralai/client/models/modellist.py: + id: 00693c7eec60 + last_write_checksum: sha1:4b9cdd48439f0ebc1aa6637cc93f445fc3e8a424 + pristine_git_object: 6a5209fa6dac59539be338e9ac6ffbefd18057ee + src/mistralai/client/models/moderationobject.py: + id: 132faad0549a + last_write_checksum: sha1:d108ea519d2f491ddbc2e99ab5b8cc02e6987cf8 + pristine_git_object: a6b44b96f00f47c168cd1b2339b7aa44e6ca139e + src/mistralai/client/models/moderationresponse.py: + id: 06bab279cb31 + last_write_checksum: sha1:d31313c2164ecbc5a5714435a52b6f0dda87b8fe + pristine_git_object: 288c8d82d87a9944ae6d7a417bb92e558c6dcc0f + src/mistralai/client/models/no_response_error.py: + id: 2849e0a482e2 last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 - src/mistralai/models/ocrimageobject.py: - id: 44523566cf03 - last_write_checksum: sha1:75bb3b2eec938bd59052ea85244130770d787cbf - pristine_git_object: cec0acf4104ba7153270a1130ac2ac58a171b147 - src/mistralai/models/ocrpagedimensions.py: - id: 0d8589f80c1a - last_write_checksum: sha1:d62f216c61756592e6cde4a5d72b68eedeaddcc5 - pristine_git_object: d1aeb54d869545aec3ecaad1240f1be2059280f1 - src/mistralai/models/ocrpageobject.py: - id: 2dfef21e786f - last_write_checksum: sha1:667013bdfafb5ed0867fa9cd350455f66fee3e90 - pristine_git_object: 737defbaea323e0f3ccd95c2a721f57acc9f43a0 - src/mistralai/models/ocrrequest.py: - id: 7dbc4bb7cafb - last_write_checksum: sha1:2f49cf3d70f2aa11cf2e7ac9f7cc262901387eb5 - pristine_git_object: 0e061ac95f2d92d0d8bb14a2d27b64d01bb4e962 - src/mistralai/models/ocrresponse.py: - id: a187e70d8c2e - last_write_checksum: sha1:0c09aee803a5e1a3ba7c7f5d0ce46e96ee3339ca - pristine_git_object: 7b65bee7e6c0fffc7019f7843dcf88c0b5fade4e - src/mistralai/models/ocrtableobject.py: - id: 1be0c3cc027f - last_write_checksum: sha1:804d15ad21276f47f5ea9beccab9e471840ac32e - pristine_git_object: 5f30ab5e15dabf6a96498f46cf6178dca7fdb906 - src/mistralai/models/ocrusageinfo.py: - id: 91ab3d4cd57a - last_write_checksum: sha1:018eaf85ebffbb3392ed3c6688a41882a0893015 - pristine_git_object: 36c9f826cc64f67b254bdd07b00ad77857a91e1c - src/mistralai/models/outputcontentchunks.py: - id: 25ae74f4c9b8 + src/mistralai/client/models/ocrimageobject.py: + id: 685faeb41a80 + last_write_checksum: sha1:93f3d24c4b7513fffef60d5590f3e5a4a0b6e1e4 + pristine_git_object: e97fa8df46c6e39775b3c938c7e1862a507090d2 + src/mistralai/client/models/ocrpagedimensions.py: + id: 02f763afbc9f + last_write_checksum: sha1:28e91a96916711bce831e7fa33a69f0e10298eed + pristine_git_object: f4fc11e0952f59b70c49e00d9f1890d9dd93a0df + src/mistralai/client/models/ocrpageobject.py: + id: 07a099f89487 + last_write_checksum: sha1:367035d07f306aa5ce73fc77635d061a75612a68 + pristine_git_object: f8b43601e7a3dd4fae554c763d3ed1ee6f2927a3 + src/mistralai/client/models/ocrrequest.py: + id: 36f204c64074 + last_write_checksum: sha1:d4b7a8bf70efe5828d04d773f4b82284a18656f1 + pristine_git_object: 03a6028c5cc298b3ed66ae5f31c310d573a954e5 + src/mistralai/client/models/ocrresponse.py: + id: 2fdfc881ca56 + last_write_checksum: sha1:fb848d5f5c1456028a1e04b9e4f5be3234fa073f + pristine_git_object: 2813a1ca4c94d690f248a318a9e35d655d80600c + src/mistralai/client/models/ocrtableobject.py: + id: d74dd0d2ddac + last_write_checksum: sha1:6821e39003e2ca46dc31384c2635e59763fddb98 + pristine_git_object: 0c9091de8975d8bd8e673aadbb69a619b96d77e8 + src/mistralai/client/models/ocrusageinfo.py: + id: 272b7e1785d5 + last_write_checksum: sha1:b466bdd22ad5fa5f08c8aa51e3a6ff5e2fcbf749 + pristine_git_object: 62f07fd4fafa4c16a8cf80a9f52754904943272a + src/mistralai/client/models/outputcontentchunks.py: + id: 9ad9741f4975 last_write_checksum: sha1:47d74d212ebcb68ff75a547b3221f2aee3e07bfe pristine_git_object: ad0c087e0dcbe302dd9c73d1ea03e109e9a66ef7 - src/mistralai/models/paginationinfo.py: - id: 7e6919dfd6b1 - last_write_checksum: sha1:5ae05b383e9381862b8a980d83e73765b726294d - pristine_git_object: 00d4f1ec906e8485fdcb3e4b16a0b01acfa2be4b - src/mistralai/models/prediction.py: - id: ad77ec075e6d - last_write_checksum: sha1:d359ab3a37229212459228329219a1ec26a0381d - pristine_git_object: 582d87896b477de867cadf5e85d58ee71c445df3 - src/mistralai/models/processingstatusout.py: - id: 54d1c125ef83 - last_write_checksum: sha1:475749250ada2566c5a5d769eda1d350ddd8be8f - pristine_git_object: e67bfa865dcf94656a67f8612a5420f8b43cc0ec - src/mistralai/models/realtimetranscriptionerror.py: - id: f869fd6faf74 - last_write_checksum: sha1:17f78beea9e1821eed90c8a2412aadf953e17774 - pristine_git_object: 0785f7001aeaba7904120a62d569a35b7ee88a80 - src/mistralai/models/realtimetranscriptionerrordetail.py: - id: d106a319e66b - last_write_checksum: sha1:16e0fea1a3be85dfea6f2c44a53a15a3dc322b4c - pristine_git_object: cb5d73f861ce053a17b66695d2b56bafe1eeb03e - src/mistralai/models/realtimetranscriptionsession.py: - id: 48c7076e6ede - last_write_checksum: sha1:ae722fc946adf7282fd79c3a2c80fb53acc70ef2 - pristine_git_object: bcd0cfe37600b80e59cd50bd0edac3444be34fdb - src/mistralai/models/realtimetranscriptionsessioncreated.py: - id: 24825bcd61b2 - last_write_checksum: sha1:81f840757637e678c4512765ba8fda060f5af8cb - pristine_git_object: 9a2c2860d1538f03e795c62754244131820e2d44 - src/mistralai/models/realtimetranscriptionsessionupdated.py: - id: 5575fb5d1980 - last_write_checksum: sha1:a2d8d5947ba6b46dcd9a0a1e377067dbb92bfdf1 - pristine_git_object: ad1b513364f5d8d2f92fbc012509bf7567fa4573 - src/mistralai/models/referencechunk.py: - id: 6cdbb4e60749 - last_write_checksum: sha1:48a4dddda06aadd16f6ea34c58848430bd561432 - pristine_git_object: 1864ac794d4e637556003cbb2bf91c10832d90f9 - src/mistralai/models/requestsource.py: - id: 1836766b9e81 + src/mistralai/client/models/paginationinfo.py: + id: 48851e82d67e + last_write_checksum: sha1:b17cc84c592706882d5819b1a706c9a206de9198 + pristine_git_object: 0252f4482f50b34a35f52911b4b57b6899751b42 + src/mistralai/client/models/prediction.py: + id: 1cc842a069a5 + last_write_checksum: sha1:d9bd04d22d58e7e1be0195aaed218a4f407db9c0 + pristine_git_object: f2c5d9c60c50c6e397d7df9ce71ccff957b0e058 + src/mistralai/client/models/processingstatusout.py: + id: 3df842c4140f + last_write_checksum: sha1:83fbbccf635fabf60452dfa8dcac696033c3d436 + pristine_git_object: 031f386fb4381b8e2ead1bd22f7f53e59e37f6bb + src/mistralai/client/models/realtimetranscriptionerror.py: + id: 8c2267378f48 + last_write_checksum: sha1:671be287639964cc6ac7efbed41998f225845e2e + pristine_git_object: e6a889de576f9e36db551a44d4ed3cf0c032e599 + src/mistralai/client/models/realtimetranscriptionerrordetail.py: + id: 5bd25cdf9c7a + last_write_checksum: sha1:49ff15eb41e8964ba3b150e2fca70f6529dee58f + pristine_git_object: 27bb8d872792723b06238b3f0eebed815948fd63 + src/mistralai/client/models/realtimetranscriptionsession.py: + id: 02517fa5411a + last_write_checksum: sha1:a6db31662165d3df47a5da11efd1923121d1593e + pristine_git_object: 3a3306513c111125c71871024caa650176360c1b + src/mistralai/client/models/realtimetranscriptionsessioncreated.py: + id: 4e3731f63a3c + last_write_checksum: sha1:5d2e0541b58a3c647ded25d6a0cf8590f64cf0db + pristine_git_object: cc6d5028f221e1794c723dedac5c73564ddb61f7 + src/mistralai/client/models/realtimetranscriptionsessionupdated.py: + id: 686dc4f2450f + last_write_checksum: sha1:2311bf0107f0f957c48ee1841cc95369269a6105 + pristine_git_object: 3da23595291cd49e42d30646288f4f39da6f8c00 + src/mistralai/client/models/referencechunk.py: + id: 921acd3a224a + last_write_checksum: sha1:abfc5818dbe9e40be5d71436f2ffd1a9b53bd4ab + pristine_git_object: 4c703b8165329a55343c20b5080670168327afc4 + src/mistralai/client/models/requestsource.py: + id: 3f2774d9e609 last_write_checksum: sha1:31aae791bf737ad123fe189227d113838204ed42 pristine_git_object: 7b0a35c44050b6fca868479e261805a77f33e230 - src/mistralai/models/responsedoneevent.py: - id: 6300eaecde3c - last_write_checksum: sha1:693d832a480e943ff9c3e4f6822bea8358750ee1 - pristine_git_object: 5a3a3dfb8630713a618cc23f97660840e4fbbeca - src/mistralai/models/responseerrorevent.py: - id: 88185105876c - last_write_checksum: sha1:5adfc1acdba4035f1a646a7678dd09e16d05e747 - pristine_git_object: 6cb1b26885ad9ded4f75f226b0ce713206cb0a49 - src/mistralai/models/responseformat.py: - id: 6d5e093fdba8 - last_write_checksum: sha1:4c4a801671419f403263caafbd90dbae6e2203da - pristine_git_object: 92284017b5b895673e510a739bc5c5ed104de4af - src/mistralai/models/responseformats.py: - id: e5fccecf2b70 + src/mistralai/client/models/responsedoneevent.py: + id: cf8a686bf82c + last_write_checksum: sha1:1fa63522f52a48a8e328dc5b3fe2c6f5206b04cc + pristine_git_object: 5405625692cb22c60a7b5f5a6f1b58cee5676576 + src/mistralai/client/models/responseerrorevent.py: + id: b286d74e8724 + last_write_checksum: sha1:f570a02791afb3fe60e99cbb4993c2d1f8dc476d + pristine_git_object: c9ef95a04c91c32e7a7973309e2174b7e776f099 + src/mistralai/client/models/responseformat.py: + id: 6ab8bc8d22c0 + last_write_checksum: sha1:ad0489488713a977dbf4eac739ce2734c8280350 + pristine_git_object: 5899b0175cefd4159eb680a3715a72fa78577ba4 + src/mistralai/client/models/responseformats.py: + id: c4462a05fb08 last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 - src/mistralai/models/responsestartedevent.py: - id: 37fbb3e37d75 - last_write_checksum: sha1:1d1eb4b486b2b92d167367d6525a8ea709d00c15 - pristine_git_object: d14d45ef8aa0d4e6dfa5893c52ae292f1f9a5780 - src/mistralai/models/responsevalidationerror.py: - id: 4b46e43f015b - last_write_checksum: sha1:c90231f7d7d3e93d6a36972ec4bead76fcb9ac47 - pristine_git_object: ed30165511c209289a030c5e9d9af1d2ad93d77c - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py: - id: 81db6b688ded - last_write_checksum: sha1:8a7f0585855118e73fcd8f7213757172ac94c6fc - pristine_git_object: bfe62474610239f6e1ac0b5a4dc4b6ee9d321bd6 - src/mistralai/models/retrievefileout.py: - id: 5cf73a0007f0 - last_write_checksum: sha1:04abbd25f8757b7d9763a2c0aaca561a78960fbd - pristine_git_object: 94540083c22b330dc48428e0d80f1cf2292b93ab - src/mistralai/models/sampletype.py: - id: d1558bd8d355 - last_write_checksum: sha1:fbfdf1616eb6b64d785c11f11a33fca794de19eb - pristine_git_object: efb43e9be278aa00cda9828c5c8cb3edabc68d0f - src/mistralai/models/sdkerror.py: - id: d3c914c3c63a - last_write_checksum: sha1:6d6dafaf73210b86ef2fea441e2e864752242737 - pristine_git_object: 65c45cf1c2cb4047e3cce21538890e5f62136f0f - src/mistralai/models/security.py: - id: 88dd24d389d4 - last_write_checksum: sha1:3d460b276d68380a64d8d91947981ce27d92e552 - pristine_git_object: cf05ba8fbce8d7b9199396c41ccd4c218d71998b - src/mistralai/models/shareenum.py: - id: 371f676fce97 - last_write_checksum: sha1:9061b04c7b26435911ea18b095d76400e1ab1698 - pristine_git_object: 634ba4b7e800e134f209fa851391b1a49cd6fc97 - src/mistralai/models/sharingdelete.py: - id: 334b4a8820ae - last_write_checksum: sha1:e21d1a3cd972b02beecd3a2d3ed3ebf70ea9c414 - pristine_git_object: ebcdbab517d524cf4f2056fb253acb713e042d58 - src/mistralai/models/sharingin.py: - id: b762157651b7 - last_write_checksum: sha1:479261e2c4ad827b878b66afa5dfaec49df4573a - pristine_git_object: f7bb89ca1b670cfa9d66b3135e762e04ba6454a4 - src/mistralai/models/sharingout.py: - id: "198686162036" - last_write_checksum: sha1:ae269a353d6733ac81ab6a4f3ea3368eef2a99ec - pristine_git_object: 12455818a5c1f44538696015bee079bce9567cdc - src/mistralai/models/source.py: - id: 6f2e7cd2285e - last_write_checksum: sha1:b0fe76d6566e4573317ad4c862ddc11423a8bde7 - pristine_git_object: cc3abce298c4b817081610238e489d4023ca6f3f - src/mistralai/models/ssetypes.py: - id: 7817469fd731 + src/mistralai/client/models/responsestartedevent.py: + id: 24f54ee8b0f2 + last_write_checksum: sha1:5f7a4fad7c13f89b6e3672e422d5ef902aa5bf03 + pristine_git_object: dc6a10f91e2bb0d13a582ed03e7db2089b75bcf7 + src/mistralai/client/models/responsevalidationerror.py: + id: c244a88981e0 + last_write_checksum: sha1:2687c9ca7df0763384030719e5c1447d83f511b3 + pristine_git_object: bab5d0b70e0bb2ea567a16a1a7c5db839651836f + src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py: + id: 6fefa90ca351 + last_write_checksum: sha1:c34e2f55663cafe353e628fbd978a6be7ca6a467 + pristine_git_object: 7fdcd37d5879aaca158f459df830a5a4dc55bfa0 + src/mistralai/client/models/retrievefileout.py: + id: 8bb5859aa0d0 + last_write_checksum: sha1:9d182b5b20c8edef9b98a42036b13afd98031fd5 + pristine_git_object: ffd0617a1c6465a5f8080eb65e382e7a9169eef4 + src/mistralai/client/models/sampletype.py: + id: a9309422fed7 + last_write_checksum: sha1:1eb21a68c138e9a0d39b4dd14bcffc9e3ff0784f + pristine_git_object: e0727b028c790a62da67784965f825436dead4f8 + src/mistralai/client/models/sdkerror.py: + id: 12f991dad510 + last_write_checksum: sha1:9ee3f2dfd9977ce77957d60116db7d04740a4eed + pristine_git_object: ceb03c4868f9c9111007d6c16411f5da1954f211 + src/mistralai/client/models/security.py: + id: c2ca0e2a36b7 + last_write_checksum: sha1:415802794c6a3f22c58e863be0f633727f681600 + pristine_git_object: 1b67229bee0b64f3a9e8fc3600a7b0c9c13c0a2d + src/mistralai/client/models/shareenum.py: + id: a0e2a7a16bf8 + last_write_checksum: sha1:0beaa4472ed607142b485c9e208441f9050746b9 + pristine_git_object: ca1b96245e81327aa830f07c0588dccdc1ee518e + src/mistralai/client/models/sharingdelete.py: + id: f5ecce372e06 + last_write_checksum: sha1:c943bfc24aa0f2035a1b5261d29efb5f3518a555 + pristine_git_object: d659342f1330d73354d557a45bc1a16015a38d8b + src/mistralai/client/models/sharingin.py: + id: e953dda09c02 + last_write_checksum: sha1:996c17a8db2c61daed285ee5cafd44481fbd1483 + pristine_git_object: 630f4c70552167237735797f6b64d3f1df5ea214 + src/mistralai/client/models/sharingout.py: + id: 0b8804effb5c + last_write_checksum: sha1:b3356792affd50e062bb1f1a84d835bbcfeb50ab + pristine_git_object: 195701d111514fe9aebfedce05dbb4bafab67fed + src/mistralai/client/models/source.py: + id: fcee60a4ea0d + last_write_checksum: sha1:6f3ea355c62280e1fc6008da69ed0b987f53fd72 + pristine_git_object: 181b327ea73a9bcf9fb90f95633da71cee96e599 + src/mistralai/client/models/ssetypes.py: + id: 1733e4765106 last_write_checksum: sha1:1901bf6feee92ac100113e0a98dc0abe6e769375 pristine_git_object: 796f0327cbb1372c1b2a817a7db39f8f185a59be - src/mistralai/models/systemmessage.py: - id: 0f0c7d12c400 - last_write_checksum: sha1:6886cc2f9603aabf75289ccc895e23ad45e65dc7 - pristine_git_object: 2b34607b39a1a99d6569985818a89d9e973f3cdd - src/mistralai/models/systemmessagecontentchunks.py: - id: 5a051e10f9df - last_write_checksum: sha1:bef0630a287d9000595a26049290b978c0816ddc - pristine_git_object: a1f04d1e5802521d4913b9ec1978c3b9d77ac38f - src/mistralai/models/textchunk.py: - id: 7dee31ce6ec3 - last_write_checksum: sha1:5ae5f498eaf03aa99354509c7558de42f7933c0c - pristine_git_object: 6052686ee52d3713ddce08f22c042bab2569f4da - src/mistralai/models/thinkchunk.py: - id: 8d0ee5d8ba9c - last_write_checksum: sha1:34f0cc91e66cb0ad46331b4e0385534d13b9ee1c - pristine_git_object: 627ae4883698696774b7a285a73326a4509c6828 - src/mistralai/models/timestampgranularity.py: - id: e0cb6c4efa2a + src/mistralai/client/models/systemmessage.py: + id: 500ef6e85ba1 + last_write_checksum: sha1:0e8e34fa66e4bb8bf1128b3007ef72bf33690e1e + pristine_git_object: 9e01bc57bd17a5ecf6be5fee3383bbb9e03a8ab5 + src/mistralai/client/models/systemmessagecontentchunks.py: + id: 297e8905d5af + last_write_checksum: sha1:4581a28c592708bf51dbc75b28fe9f7bddde3c70 + pristine_git_object: 7a79737964b79e39b760ef833cce24e411f5aa90 + src/mistralai/client/models/textchunk.py: + id: 9c96fb86a9ab + last_write_checksum: sha1:8abd7cb3d8149458d95268eea8f18d5096e77fb0 + pristine_git_object: 4207ce7e46141aed94cf0f8726bb2433709101ca + src/mistralai/client/models/thinkchunk.py: + id: 294bfce193a4 + last_write_checksum: sha1:a6cd3efbf01dc0a72818675893594179addcfd12 + pristine_git_object: b1560806b88b733bf3b574c3e0d45e93df892548 + src/mistralai/client/models/timestampgranularity.py: + id: 68ddf8d702ea last_write_checksum: sha1:68ea11a4e27f23b2fcc976d0a8eeb95f6f28ba85 pristine_git_object: 5bda890f500228ba0c3dc234edf09906b88cb522 - src/mistralai/models/tool.py: - id: c0a9b60b6cf1 - last_write_checksum: sha1:805030012b6cf4d6159c1515b44e1c999ea2349a - pristine_git_object: b14a6adf2a804153e071c28b7e225594278b7443 - src/mistralai/models/toolcall.py: - id: 08f53b1090d7 - last_write_checksum: sha1:3b876a5d90066ebc4a337e7ba90b0607d9028c9e - pristine_git_object: 1f36792484f22af884a2b651442dbf1086e36f53 - src/mistralai/models/toolchoice.py: - id: de7498a868da - last_write_checksum: sha1:ec3178ff2a398b569ea6161e37006a349b75e94f - pristine_git_object: f8e1b48621527ca86f07efd4500089d339ddeb6a - src/mistralai/models/toolchoiceenum.py: - id: 580f382c7857 + src/mistralai/client/models/tool.py: + id: 48b4f6f50fe9 + last_write_checksum: sha1:5f80f78858fb50e0688123f8dd1478eeb0e7c5af + pristine_git_object: 4b29f575a3604d83fd6b492c26327f36e6e5a681 + src/mistralai/client/models/toolcall.py: + id: fb34a1a3f3c2 + last_write_checksum: sha1:f4c5de640f5b942f180062388be187a910067a1b + pristine_git_object: 558b49bfaec7c306c093b97a4bbf722fe9f4b6b1 + src/mistralai/client/models/toolchoice.py: + id: 14f7e4cc35b6 + last_write_checksum: sha1:f833d01b307437a83705b9b669b0d95eab4c01e0 + pristine_git_object: 2c7f6cbf6ebfbbdcce7d82b885b5e07b6b52d066 + src/mistralai/client/models/toolchoiceenum.py: + id: c7798801f860 last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 - src/mistralai/models/toolexecutiondeltaevent.py: - id: 674ab6adad2e - last_write_checksum: sha1:002e73c21df7e785268d77bad00b7967a514ede7 - pristine_git_object: 4fca46a80810a9976a0de70fef9e895be82fa921 - src/mistralai/models/toolexecutiondoneevent.py: - id: 86a2329a500d - last_write_checksum: sha1:00174f618358d49546ff8725a6dc3a9aebe5926c - pristine_git_object: 621d55718957c766c796f6f98814ed917ccbaadc - src/mistralai/models/toolexecutionentry.py: - id: 41e2484af138 - last_write_checksum: sha1:c05c9f72cf939d4da334489be57e952b2fbd68f9 - pristine_git_object: 9f70a63b720b120283adc1292188f1f0dd8086a1 - src/mistralai/models/toolexecutionstartedevent.py: - id: 0987fdd1cd45 - last_write_checksum: sha1:beab5d913fb60fc98ec81dffb4636143e23286ec - pristine_git_object: 80dd5e97084cdedcdb2752491a61d8b2aadb091a - src/mistralai/models/toolfilechunk.py: - id: 275d194f5a7b - last_write_checksum: sha1:0ecb2b0ef96d57084c19f43553fdfafdf209ec16 - pristine_git_object: 87bc822c091f1b0c1896f0da16764e225e3f324c - src/mistralai/models/toolmessage.py: - id: dff99c41aecf - last_write_checksum: sha1:19fbda605416fcc20f842b6d3067f64de2691246 - pristine_git_object: ef917c4369a7459e70f04da2c20ed62b9316d9bc - src/mistralai/models/toolreferencechunk.py: - id: 5e3482e21a7e - last_write_checksum: sha1:21038657452d30fd80b5204451b7b7bfbbce6cf6 - pristine_git_object: 2a751cb08f1442ca5f91ab0b688db822c6f72dd7 - src/mistralai/models/tooltypes.py: - id: c4ef111ec45b - last_write_checksum: sha1:f9cd152556d95e9e197ac0c10f65303789e28bcb - pristine_git_object: f54893c259518313218d9ee307669c291a8c0cf8 - src/mistralai/models/trainingfile.py: - id: 150e9031690e - last_write_checksum: sha1:f20266317087b92eb74ed8cd48e7477666faf9a8 - pristine_git_object: 99bd49dd760960558be40adf138f9b4b95ee62d9 - src/mistralai/models/transcriptionresponse.py: - id: b50f2e392e31 - last_write_checksum: sha1:79d57bf44dbad0f364ac57ad967642271b7a7526 - pristine_git_object: 54a98a5ba7b83a6b7f6a39046b400a61e9889898 - src/mistralai/models/transcriptionsegmentchunk.py: - id: ccd6d5675b49 - last_write_checksum: sha1:01b1c1c52a1e324c8f874586cdd0349fed35443c - pristine_git_object: 40ad20b3abc2f0b2c0d2d695ba89237f66cc0b2b - src/mistralai/models/transcriptionstreamdone.py: - id: 42177659bf0f - last_write_checksum: sha1:5fda2b766b2af41749006835e45c95f708eddb28 - pristine_git_object: e1b1ab3d6f257786a5180f6876f47d47414e7e72 - src/mistralai/models/transcriptionstreamevents.py: - id: 9593874b7574 - last_write_checksum: sha1:ace344cfbec0af2ad43b0b61ae444e34f9e9da99 - pristine_git_object: 8207c03fef9d76ca7405b85d93c2f462eae22329 - src/mistralai/models/transcriptionstreameventtypes.py: - id: e2e35365ad39 + src/mistralai/client/models/toolexecutiondeltaevent.py: + id: df8f17cf3e07 + last_write_checksum: sha1:32257ebf812efe05763df71e498018d53884a32d + pristine_git_object: 0268e6a0d9b3c25afe1022e61a630e926a50f135 + src/mistralai/client/models/toolexecutiondoneevent.py: + id: 514fdee7d99f + last_write_checksum: sha1:e99be4db8d87bb3aa9383c062846d35923721292 + pristine_git_object: 854baee98a119caf237ca0f39e4ddd7a36577771 + src/mistralai/client/models/toolexecutionentry.py: + id: 76db69eebe41 + last_write_checksum: sha1:1577af968f800b28a3da2006c44016a901532591 + pristine_git_object: 839709fb8ea63cc358de9f5e71180bf9e94cf5a5 + src/mistralai/client/models/toolexecutionstartedevent.py: + id: 40fadb8e49a1 + last_write_checksum: sha1:49922a41c52e7f25eab26c8a34ec481c319c62b4 + pristine_git_object: 66438cfc33b171868f597ff3f80a82a40d1396f4 + src/mistralai/client/models/toolfilechunk.py: + id: 26c8aadf416a + last_write_checksum: sha1:753db4dd27eea752066a04774094cba73aeb8ca0 + pristine_git_object: 62b5ffeda19a7fa614ccc5e390450f2452dd119d + src/mistralai/client/models/toolmessage.py: + id: 15f1af161031 + last_write_checksum: sha1:47b4b3426ecde263ce4f2918ff98135952447b40 + pristine_git_object: eae2d2aef69dc4134f42714d69625e7b6c43e8c9 + src/mistralai/client/models/toolreferencechunk.py: + id: 822e9f3e70de + last_write_checksum: sha1:bf6b77aff4de13f4f374513e85785a1c6b17b87b + pristine_git_object: 882b1563a44cbc77256b6f44b1f41d602956d0b4 + src/mistralai/client/models/tooltypes.py: + id: 86c3b54272fd + last_write_checksum: sha1:94cd31b4a170bde0983bc48e8c1148693c3d67e0 + pristine_git_object: abb26c258280a889d784e662b45ed486fc648817 + src/mistralai/client/models/trainingfile.py: + id: 2edf9bce227d + last_write_checksum: sha1:12257eadce20511a4f3e3f3424e3bca112510f5f + pristine_git_object: 1d9763e0fd8e44f9b6e05254c5abb5a81fdf0b17 + src/mistralai/client/models/transcriptionresponse.py: + id: 60896dbc6345 + last_write_checksum: sha1:1f3066c34b7e76acc46ddb1e69869f3c62bfb841 + pristine_git_object: 24c0b92e424e91d40972c0826553a7d344a8f932 + src/mistralai/client/models/transcriptionsegmentchunk.py: + id: d1e6f3bdc74b + last_write_checksum: sha1:5f16b05debe943432b69d390844216a703adf71a + pristine_git_object: c89d84fcf3842da23e1f710309446b4c592ceeb3 + src/mistralai/client/models/transcriptionstreamdone.py: + id: 066a9158ed09 + last_write_checksum: sha1:1f9a29e826dcc91ed0c7f08b69aaa81987d810b7 + pristine_git_object: add17f562385c3befc2932b16448901154372ca6 + src/mistralai/client/models/transcriptionstreamevents.py: + id: b50b3d74f16f + last_write_checksum: sha1:38d2ff40e9d4f5d09fa24eef0925d306cf434bf0 + pristine_git_object: caaf943a4662ecccab96183f63c226eaefee2882 + src/mistralai/client/models/transcriptionstreameventtypes.py: + id: 6f71f6fbf4c5 last_write_checksum: sha1:38b7334aebf400e1abb2b20b0f2890880f0fc2f7 pristine_git_object: 4a910f0abca2912746cac60fd5a16bd5464f2457 - src/mistralai/models/transcriptionstreamlanguage.py: - id: 635759ec85f3 - last_write_checksum: sha1:93e389c2c8b41e378cfe7f88f05d8312236024e6 - pristine_git_object: 15b7514415e536bb04fd1a69ccea20615b5b1fcf - src/mistralai/models/transcriptionstreamsegmentdelta.py: - id: 83d02b065099 - last_write_checksum: sha1:3f70d4d58d8fedb784d056425662e7dc2f9ed244 - pristine_git_object: 550c83e7073bc99fdac6a0d59c5c30daa9d35f43 - src/mistralai/models/transcriptionstreamtextdelta.py: - id: ce0861d8affd - last_write_checksum: sha1:84a3b6c6d84a896e59e2874de59d812d3db657a5 - pristine_git_object: daee151f4ceaaee6c224b6dd078b4dfb680495b3 - src/mistralai/models/unarchiveftmodelout.py: - id: d758d3dee216 - last_write_checksum: sha1:b60e3292d2c4e6bf1456649184eaef4c75732cfc - pristine_git_object: 55c0ea8aa841ecef08f64020f099353efbdbcf7d - src/mistralai/models/updateftmodelin.py: - id: dbf79e18efd0 - last_write_checksum: sha1:aab40882f622a32054d73e33ca2be279bb880080 - pristine_git_object: 1bd0eaf2eb9b3427da6f4581b36d4316c0d129bf - src/mistralai/models/uploadfileout.py: - id: 1fa81af96888 - last_write_checksum: sha1:ebd3800e23e32b7f95665393db9a8e955c2912ea - pristine_git_object: f235fdcdf23d39d408d20a43597652f8daf677b0 - src/mistralai/models/usageinfo.py: - id: 62e303fb96aa - last_write_checksum: sha1:7f81b8c11fb5076e03a9fa40865382c9b45b700e - pristine_git_object: cedad5c12a96418567294e91812bfd96dce875bf - src/mistralai/models/usermessage.py: - id: dd10edab3b81 - last_write_checksum: sha1:a22b667ed90d8e34923d36422ef7ea6ae83d2dd7 - pristine_git_object: 61590bed06e1a397a1166a04a0b2405b833d19ff - src/mistralai/models/validationerror.py: - id: 0c6798c22859 - last_write_checksum: sha1:be4e31bc68c0eed17cd16679064760ac1f035d7b - pristine_git_object: e971e016d64237f24d86c171222f66575152fd1f - src/mistralai/models/wandbintegration.py: - id: a2f0944d8dbd - last_write_checksum: sha1:43a3c6f8d77cde042cfa129954f48c419d3fe1b9 - pristine_git_object: 690538963550d6adaf291fab8344f317c3c9080e - src/mistralai/models/wandbintegrationout.py: - id: bfae63e4ff4c - last_write_checksum: sha1:843e286ce58f072f27e8cb67b4c4f35001ffe0f0 - pristine_git_object: f5a9ba802b489f595bfc2578b9f3456b5230bdb3 - src/mistralai/models/websearchpremiumtool.py: - id: "710695472090" - last_write_checksum: sha1:85a562f976a03e9a3a659018caa78d2e26caeef9 - pristine_git_object: 3bbe753acb99f74f8eb7aa63a387f35714b0a259 - src/mistralai/models/websearchtool.py: - id: d8f773002c11 - last_write_checksum: sha1:1e48212c4cc43bf937a3d21837878a1722666a30 - pristine_git_object: eeafecb4847e66075b64dc34512aaca7a045900b - src/mistralai/models_.py: - id: dfcd71fd4c33 - last_write_checksum: sha1:076e72b91c364f1a4905092b02e2ad7ebf7765c6 - pristine_git_object: d44930a0db06117ba538424273935016a133e0ae - src/mistralai/ocr.py: - id: e23da68c9ae8 - last_write_checksum: sha1:ce13d4ac0fc3cc52b2a76480c570d89cfe71c002 - pristine_git_object: ceb7dd85f958452aeb55868c65746ccf6ec200a5 - src/mistralai/py.typed: - id: 3923b7c50c56 + src/mistralai/client/models/transcriptionstreamlanguage.py: + id: e94333e4bc27 + last_write_checksum: sha1:9427411056a6239956ed3963af53c452e6fc4705 + pristine_git_object: b47024adfca2d8da6f1f01ce573bcc339cbbc63a + src/mistralai/client/models/transcriptionstreamsegmentdelta.py: + id: c0a882ce57e5 + last_write_checksum: sha1:3cc8664a90c67c412fc3c58e6841571c476697ea + pristine_git_object: 7cfffb63f31d10a247e066c8f422e4f6af2cf489 + src/mistralai/client/models/transcriptionstreamtextdelta.py: + id: 6086dc081147 + last_write_checksum: sha1:d68e4b6cefa3a1492b461fbe17cff5c5216b58f5 + pristine_git_object: ce279cf67ffc4e225ce37490f4ffd0c0d64fe993 + src/mistralai/client/models/unarchiveftmodelout.py: + id: 9dbc3bfb71ed + last_write_checksum: sha1:b2a1f9af7a5a7f5cbcda3256c46d02926e0cf2da + pristine_git_object: 511c390b4192cf85ec86150c7dad84543c68e031 + src/mistralai/client/models/updateftmodelin.py: + id: 39e2d678e651 + last_write_checksum: sha1:dd8dda798b804c4927505ac1fcbd13787f32a25d + pristine_git_object: 0471a15458f3cff4939360d3891af0fdee9ec251 + src/mistralai/client/models/uploadfileout.py: + id: 42466f2bebfb + last_write_checksum: sha1:db43df223f848a25a1526624cd3722ef3014e700 + pristine_git_object: 55e56504db280fdb4772bb061128742866555e82 + src/mistralai/client/models/usageinfo.py: + id: 54adb9a3af16 + last_write_checksum: sha1:a5f57f73d176aa8f4a9ad91daefe8e6257398abc + pristine_git_object: f1186d97357320f4bfc9d3f2a626f58d2b1a38d0 + src/mistralai/client/models/usermessage.py: + id: cb583483acf4 + last_write_checksum: sha1:1c15371710f18d7ed8f612cc450f4873f83f1eb9 + pristine_git_object: 8d92cea803368e996d68dc2f3a2dadd1d06a4675 + src/mistralai/client/models/validationerror.py: + id: 15df3c7368ab + last_write_checksum: sha1:de86af94be29bd8bfd5fa2708eeb3dda3032423d + pristine_git_object: 352409be88a1175073e5438d6da86fc9a54896fc + src/mistralai/client/models/wandbintegration.py: + id: 4823c1e80942 + last_write_checksum: sha1:a76661e93fd3b6d8a3d210ef610a40ff1da203f7 + pristine_git_object: 89489fb4527c6515a609bcb533ef59ab516c7a38 + src/mistralai/client/models/wandbintegrationout.py: + id: 6b103d74195c + last_write_checksum: sha1:e648c37d559f8cec36b3c8e06979d8ac053a2ad6 + pristine_git_object: a7f9afeb6683a173115371a686af5f95e2d29056 + src/mistralai/client/models/websearchpremiumtool.py: + id: bfe88af887e3 + last_write_checksum: sha1:af6e2fae78c2f22b98d58ab55b365d1688dba8cb + pristine_git_object: 8d2d4b5dfea50a34ac744181790bf5db84809b1c + src/mistralai/client/models/websearchtool.py: + id: 26b0903423e5 + last_write_checksum: sha1:49295d52d59e914620dedf9d22fb2290896039cf + pristine_git_object: ba4cc09f84faebb438a631db6ac328fea2ced609 + src/mistralai/client/models_.py: + id: 1d277958a843 + last_write_checksum: sha1:8f76c2395cb534e94366033007df24bf56c43ac7 + pristine_git_object: 5ef9da096e58023aaa582f31717b4ee7a4b720b0 + src/mistralai/client/ocr.py: + id: 2f804a12fc62 + last_write_checksum: sha1:877f0c2db0319ea6b5ccf3d92f35bf633df10eda + pristine_git_object: ce7e2126dda2bc2b12cefb96e955edd3c7d4b6ab + src/mistralai/client/py.typed: + id: d95cd1565e33 last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 - src/mistralai/sdk.py: - id: b2a76476b492 - last_write_checksum: sha1:f0ce70fdd61fc69a6afb59a46b42719c14e429d8 - pristine_git_object: c83b53e0445788e27d0e451030807f1c6b86560b - src/mistralai/sdkconfiguration.py: - id: e6e7f1fb8b52 - last_write_checksum: sha1:63a0ae64777a9d39debeb6ef36ac6d71dadc6d80 - pristine_git_object: 7e77925ddc9aa01c4e5f8ba2ca52aa7f32e89859 - src/mistralai/transcriptions.py: - id: ba6b040274f2 - last_write_checksum: sha1:0cd336f14cccb581ff955feaf8bc6f7df185f27b - pristine_git_object: 90f2e58a3677e922cb5c8aac4b30d5e697ef2f05 - src/mistralai/types/__init__.py: - id: b89b8375c971 + src/mistralai/client/sdk.py: + id: 48edbcb38d7e + last_write_checksum: sha1:831d2d1fee16c8d970c946f80ec56ba965e4f0ca + pristine_git_object: 9957940005a1150762e9fc284993cefeb2e8831a + src/mistralai/client/sdkconfiguration.py: + id: b7dd68a0235e + last_write_checksum: sha1:a24763668db44bf36ca35d1efa4873e2495dd716 + pristine_git_object: df50d16fa502e8b4c2a4567f3541fd48bfc1e324 + src/mistralai/client/transcriptions.py: + id: 75b45780c978 + last_write_checksum: sha1:5c305412b646fa70232fd141e93378b3b4d4b3c4 + pristine_git_object: 455010243710d56d033861b1440cc1e30924d40c + src/mistralai/client/types/__init__.py: + id: 000b943f821c last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c - src/mistralai/types/basemodel.py: - id: 18149749a011 + src/mistralai/client/types/basemodel.py: + id: 7ec465a1d3ff last_write_checksum: sha1:10d84aedeb9d35edfdadf2c3020caa1d24d8b584 pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee - src/mistralai/utils/__init__.py: - id: 6f6ad3db2456 + src/mistralai/client/utils/__init__.py: + id: b69505f4b269 last_write_checksum: sha1:c7c1ee47be7ac3774b042c8aee439143493ed3ce pristine_git_object: f9c2edce8ecf2d2a4ab0ad36129ac70afd3d1f2f - src/mistralai/utils/annotations.py: - id: 76966ef1943a + src/mistralai/client/utils/annotations.py: + id: 1ffdedfc66a2 last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 - src/mistralai/utils/datetimes.py: - id: a0aa72e39d40 + src/mistralai/client/utils/datetimes.py: + id: c40066d868c9 last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 - src/mistralai/utils/enums.py: - id: 400af6d98484 + src/mistralai/client/utils/enums.py: + id: a0735873b5ac last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 - src/mistralai/utils/eventstreaming.py: - id: 7b58f8ceb28e + src/mistralai/client/utils/eventstreaming.py: + id: 3263d7502030 last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc - src/mistralai/utils/forms.py: - id: a584268d234f + src/mistralai/client/utils/forms.py: + id: 58842e905fce last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 - src/mistralai/utils/headers.py: - id: 3b4141506f5a + src/mistralai/client/utils/headers.py: + id: 9066de2ead8b last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a - src/mistralai/utils/logger.py: - id: e35e15a1b67e - last_write_checksum: sha1:23efbe8d8d3b9412877f3cd35b37477d0e460a2f - pristine_git_object: cc08930715f6f03a559a2f30c3a9482071a3e1e2 - src/mistralai/utils/metadata.py: - id: 617f23c58d0d + src/mistralai/client/utils/logger.py: + id: 745023607a1f + last_write_checksum: sha1:3212454c3047548e8f9099366dc0e7c37e5918ac + pristine_git_object: 2ef27ee5bb8cd37d9aa66b076c449fd9c80e2627 + src/mistralai/client/utils/metadata.py: + id: d49d535ae52c last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d - src/mistralai/utils/queryparams.py: - id: 6d86b06d25db + src/mistralai/client/utils/queryparams.py: + id: bb77d4664844 last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 - src/mistralai/utils/requestbodies.py: - id: 09529564c402 + src/mistralai/client/utils/requestbodies.py: + id: 946cfcd26ee4 last_write_checksum: sha1:41e2d2d2d3ecc394c8122ca4d4b85e1c3e03f054 pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c - src/mistralai/utils/retries.py: - id: 3c8dad479e7d + src/mistralai/client/utils/retries.py: + id: 5f1a5b90423c last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 - src/mistralai/utils/security.py: - id: e8a6622acc38 + src/mistralai/client/utils/security.py: + id: 1acb7c006265 last_write_checksum: sha1:d86d2fd73cbb4a77f72395c10fff1c700efcf42e pristine_git_object: 3b8526bfdf5c9c871ed184a2ec785f7bc1ebe57e - src/mistralai/utils/serializers.py: - id: e3688f9815db + src/mistralai/client/utils/serializers.py: + id: 53c57c7f29a8 last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 - src/mistralai/utils/unmarshal_json_response.py: - id: 3bc4add4e1b6 - last_write_checksum: sha1:0b7b57b8a97ff6bfbb4dea22d59b8aade9a487f2 - pristine_git_object: 64d0b3a6c59921ac0a5fb05d52ba47d0b696ae0e - src/mistralai/utils/url.py: - id: 8aa618817e83 + src/mistralai/client/utils/unmarshal_json_response.py: + id: b13585fc5626 + last_write_checksum: sha1:4df16054b0c28b043d248dd8f56992574156bcd0 + pristine_git_object: 6d43d6e44056d64e272f60a466c47391a60c792d + src/mistralai/client/utils/url.py: + id: 3c6496c17510 last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 - src/mistralai/utils/values.py: - id: 3b1394457cf4 + src/mistralai/client/utils/values.py: + id: bb6ade7a7f82 last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 examples: diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index bb904c64..38b7899c 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -39,7 +39,7 @@ targets: sourceRevisionDigest: sha256:4e49849eba5334a3fe4a3d081baa9afdecd8f41dfc4c2a5115bc19ead4d92d13 sourceBlobDigest: sha256:3ab3c61ac6a4e9fab37d924d516838ca27dd7e57a1b5e9059d4db2ef29efec56 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:8fa56ecd9dd6e5f831fb96c4cfd00c65f617a03ff67f876d75ecdf28cb5bbf3c + codeSamplesRevisionDigest: sha256:deaa27e908bb7bee4f2ad753a92beb5749805f3f160eb56c5988b336d31a531c workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.685.0 diff --git a/README.md b/README.md index 131ce557..e71b1a19 100644 --- a/README.md +++ b/README.md @@ -103,7 +103,7 @@ It's also possible to write a standalone Python script without needing to set up # ] # /// -from mistralai import Mistral +from mistralai.client import Mistral sdk = Mistral( # SDK arguments @@ -136,7 +136,7 @@ This example shows how to create chat completions. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -164,7 +164,7 @@ The same SDK client can also be used to make asynchronous requests by importing ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -194,7 +194,7 @@ This example shows how to upload a file. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -218,7 +218,7 @@ The same SDK client can also be used to make asynchronous requests by importing ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -244,7 +244,7 @@ This example shows how to create agents completions. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -272,7 +272,7 @@ The same SDK client can also be used to make asynchronous requests by importing ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -302,7 +302,7 @@ This example shows how to create embedding request. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -326,7 +326,7 @@ The same SDK client can also be used to make asynchronous requests by importing ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -586,7 +586,7 @@ The stream is also a [Context Manager][context-manager] and can be used with the underlying connection when the context is exited. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -630,7 +630,7 @@ Certain SDK methods accept file objects as part of a request body or multi-part > ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -656,8 +656,8 @@ Some of the endpoints in this SDK support retries. If you use the SDK without an To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: ```python -from mistralai import Mistral -from mistralai.utils import BackoffStrategy, RetryConfig +from mistralai.client import Mistral +from mistralai.client.utils import BackoffStrategy, RetryConfig import os @@ -675,8 +675,8 @@ with Mistral( If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: ```python -from mistralai import Mistral -from mistralai.utils import BackoffStrategy, RetryConfig +from mistralai.client import Mistral +from mistralai.client.utils import BackoffStrategy, RetryConfig import os @@ -696,7 +696,7 @@ with Mistral( ## Error Handling -[`MistralError`](./src/mistralai/models/mistralerror.py) is the base class for all HTTP error responses. It has the following properties: +[`MistralError`](./src/mistralai/client/models/mistralerror.py) is the base class for all HTTP error responses. It has the following properties: | Property | Type | Description | | ------------------ | ---------------- | --------------------------------------------------------------------------------------- | @@ -709,8 +709,8 @@ with Mistral( ### Example ```python -import mistralai -from mistralai import Mistral, models +import mistralai.client +from mistralai.client import Mistral, models import os @@ -736,12 +736,12 @@ with Mistral( # Depending on the method different errors may be thrown if isinstance(e, models.HTTPValidationError): - print(e.data.detail) # Optional[List[mistralai.ValidationError]] + print(e.data.detail) # Optional[List[mistralai.client.ValidationError]] ``` ### Error Classes **Primary error:** -* [`MistralError`](./src/mistralai/models/mistralerror.py): The base class for HTTP error responses. +* [`MistralError`](./src/mistralai/client/models/mistralerror.py): The base class for HTTP error responses.
Less common errors (6) @@ -753,9 +753,9 @@ with Mistral( * [`httpx.TimeoutException`](https://www.python-httpx.org/exceptions/#httpx.TimeoutException): HTTP request timed out. -**Inherit from [`MistralError`](./src/mistralai/models/mistralerror.py)**: -* [`HTTPValidationError`](./src/mistralai/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 52 of 74 methods.* -* [`ResponseValidationError`](./src/mistralai/models/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute. +**Inherit from [`MistralError`](./src/mistralai/client/models/mistralerror.py)**: +* [`HTTPValidationError`](./src/mistralai/client/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 52 of 74 methods.* +* [`ResponseValidationError`](./src/mistralai/client/models/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute.
@@ -776,7 +776,7 @@ You can override the default server globally by passing a server name to the `se #### Example ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -796,7 +796,7 @@ with Mistral( The default server can also be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -822,7 +822,7 @@ This allows you to wrap the client with your own custom logic, such as adding cu For example, you could specify a header for every request that this sdk makes as follows: ```python -from mistralai import Mistral +from mistralai.client import Mistral import httpx http_client = httpx.Client(headers={"x-custom-header": "someValue"}) @@ -831,8 +831,8 @@ s = Mistral(client=http_client) or you could wrap the client with your own custom logic: ```python -from mistralai import Mistral -from mistralai.httpclient import AsyncHttpClient +from mistralai.client import Mistral +from mistralai.client.httpclient import AsyncHttpClient import httpx class CustomClient(AsyncHttpClient): @@ -907,7 +907,7 @@ This SDK supports the following security scheme globally: To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -931,7 +931,7 @@ The `Mistral` class implements the context manager protocol and registers a fina [context-manager]: https://docs.python.org/3/reference/datamodel.html#context-managers ```python -from mistralai import Mistral +from mistralai.client import Mistral import os def main(): @@ -958,11 +958,11 @@ You can setup your SDK to emit debug logs for SDK requests and responses. You can pass your own logger class directly into your SDK. ```python -from mistralai import Mistral +from mistralai.client import Mistral import logging logging.basicConfig(level=logging.DEBUG) -s = Mistral(debug_logger=logging.getLogger("mistralai")) +s = Mistral(debug_logger=logging.getLogger("mistralai.client")) ``` You can also enable a default debug logger by setting an environment variable `MISTRAL_DEBUG` to true. diff --git a/USAGE.md b/USAGE.md index a31d502f..18103864 100644 --- a/USAGE.md +++ b/USAGE.md @@ -5,7 +5,7 @@ This example shows how to create chat completions. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -33,7 +33,7 @@ The same SDK client can also be used to make asynchronous requests by importing ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -63,7 +63,7 @@ This example shows how to upload a file. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -87,7 +87,7 @@ The same SDK client can also be used to make asynchronous requests by importing ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -113,7 +113,7 @@ This example shows how to create agents completions. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -141,7 +141,7 @@ The same SDK client can also be used to make asynchronous requests by importing ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -171,7 +171,7 @@ This example shows how to create embedding request. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -195,7 +195,7 @@ The same SDK client can also be used to make asynchronous requests by importing ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): diff --git a/docs/sdks/accesses/README.md b/docs/sdks/accesses/README.md index 040bc24c..64a1e749 100644 --- a/docs/sdks/accesses/README.md +++ b/docs/sdks/accesses/README.md @@ -18,7 +18,7 @@ Given a library, list all of the Entity that have access and to what level. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -59,7 +59,7 @@ Given a library id, you can create or update the access level of an entity. You ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -104,7 +104,7 @@ Given a library id, you can delete the access level of an entity. An owner canno ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 173925ee..75efc492 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -17,7 +17,7 @@ Agents Completion ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -80,7 +80,7 @@ Mistral AI provides the ability to stream responses back to a client in order to ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 5bb24baa..89c4fffb 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -17,7 +17,7 @@ Chat Completion ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -83,7 +83,7 @@ Mistral AI provides the ability to stream responses back to a client in order to ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index e76efb79..634ee419 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -19,7 +19,7 @@ Moderations ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -65,7 +65,7 @@ Chat Moderations ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -112,7 +112,7 @@ Classifications ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -157,7 +157,7 @@ Chat Classifications ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index ca383176..acd43cdb 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -26,7 +26,7 @@ Create a new conversation, using a base model or an agent and append entries. Co ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -83,7 +83,7 @@ Retrieve a list of conversation entities sorted by creation time. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -126,7 +126,7 @@ Given a conversation_id retrieve a conversation entity with its attributes. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -167,7 +167,7 @@ Delete a conversation given a conversation_id. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -203,7 +203,7 @@ Run completion on the history of the conversation and the user entries. Return t ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -253,7 +253,7 @@ Given a conversation_id retrieve all the entries belonging to that conversation. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -294,7 +294,7 @@ Given a conversation_id retrieve all the messages belonging to that conversation ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -335,7 +335,7 @@ Given a conversation_id and an id, recreate a conversation from this point and r ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -388,7 +388,7 @@ Create a new conversation, using a base model or an agent and append entries. Co ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -454,7 +454,7 @@ Run completion on the history of the conversation and the user entries. Return t ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -506,7 +506,7 @@ Given a conversation_id and an id, recreate a conversation from this point and r ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/documents/README.md b/docs/sdks/documents/README.md index d3f5a975..d90e7ee7 100644 --- a/docs/sdks/documents/README.md +++ b/docs/sdks/documents/README.md @@ -25,7 +25,7 @@ Given a library, lists the document that have been uploaded to that library. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -72,7 +72,7 @@ Given a library, upload a new document to that library. It is queued for process ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -117,7 +117,7 @@ Given a library and a document in this library, you can retrieve the metadata of ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -159,7 +159,7 @@ Given a library and a document in that library, update the name of that document ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -203,7 +203,7 @@ Given a library and a document in that library, delete that document. The docume ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -240,7 +240,7 @@ Given a library and a document in that library, you can retrieve the text conten ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -282,7 +282,7 @@ Given a library and a document in that library, retrieve the processing status o ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -324,7 +324,7 @@ Given a library and a document in that library, retrieve the signed URL of a spe ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -366,7 +366,7 @@ Given a library and a document in that library, retrieve the signed URL of text ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -408,7 +408,7 @@ Given a library and a document in that library, reprocess that document, it will ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 4390b7bd..0be7ea6d 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -16,7 +16,7 @@ Embeddings ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index 57b53fc7..44c39f8a 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -25,7 +25,7 @@ Please contact us if you need to increase these storage limits. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -69,7 +69,7 @@ Returns a list of files that belong to the user's organization. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -116,7 +116,7 @@ Returns information about a specific file. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -156,7 +156,7 @@ Delete a file. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -196,7 +196,7 @@ Download a file ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -236,7 +236,7 @@ Get Signed Url ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index db6f2e1b..3c8c59c7 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -17,7 +17,7 @@ FIM completion. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -68,7 +68,7 @@ Mistral AI provides the ability to stream responses back to a client in order to ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index 666224a7..9c44be75 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -18,7 +18,7 @@ Get a list of fine-tuning jobs for your organization and user. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -67,7 +67,7 @@ Create a new fine-tuning job, it will be queued for processing. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -119,7 +119,7 @@ Get a fine-tuned job details by its UUID. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -159,7 +159,7 @@ Request the cancellation of a fine tuning job. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -199,7 +199,7 @@ Request the start of a validated fine tuning job. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/libraries/README.md b/docs/sdks/libraries/README.md index e672c190..bbdacf05 100644 --- a/docs/sdks/libraries/README.md +++ b/docs/sdks/libraries/README.md @@ -20,7 +20,7 @@ List all libraries that you have created or have been shared with you. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -59,7 +59,7 @@ Create a new Library, you will be marked as the owner and only you will have the ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -102,7 +102,7 @@ Given a library id, details information about that Library. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -143,7 +143,7 @@ Given a library id, deletes it together with all documents that have been upload ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -184,7 +184,7 @@ Given a library id, you can update the name and description. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/mistralagents/README.md b/docs/sdks/mistralagents/README.md index bdd8d588..fe0f6e35 100644 --- a/docs/sdks/mistralagents/README.md +++ b/docs/sdks/mistralagents/README.md @@ -25,7 +25,7 @@ Create a new agent giving it instructions, tools, description. The agent is then ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -77,7 +77,7 @@ Retrieve a list of agent entities sorted by creation time. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -124,7 +124,7 @@ Given an agent, retrieve an agent entity with its attributes. The agent_version ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -166,7 +166,7 @@ Update an agent attributes and create a new version. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -220,7 +220,7 @@ Delete an agent entity. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -256,7 +256,7 @@ Switch the version of an agent. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -298,7 +298,7 @@ Retrieve all versions for a specific agent with full agent context. Supports pag ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -341,7 +341,7 @@ Get a specific agent version by version number. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -383,7 +383,7 @@ Create a new alias or update an existing alias to point to a specific version. A ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -426,7 +426,7 @@ Retrieve all version aliases for a specific agent. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index f1aa3f61..8f2358de 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -17,7 +17,7 @@ Get a list of batch jobs for your organization and user. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -64,7 +64,7 @@ Create a new batch job, it will be queued for processing. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -113,7 +113,7 @@ Args: ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -154,7 +154,7 @@ Request the cancellation of a batch job. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index d51866b6..6fa28ca2 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -21,7 +21,7 @@ List all models available to the user. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -60,7 +60,7 @@ Retrieve information about a model. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -101,7 +101,7 @@ Delete a fine-tuned model. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -142,7 +142,7 @@ Update a model name or description. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -184,7 +184,7 @@ Archive a fine-tuned model. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -224,7 +224,7 @@ Un-archive a fine-tuned model. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index efcb9931..9fd9d6fc 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -16,7 +16,7 @@ OCR ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/transcriptions/README.md b/docs/sdks/transcriptions/README.md index dabab00e..9691b81d 100644 --- a/docs/sdks/transcriptions/README.md +++ b/docs/sdks/transcriptions/README.md @@ -17,7 +17,7 @@ Create Transcription ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -65,7 +65,7 @@ Create Streaming Transcription (SSE) ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/src/mistralai/client/__init__.py b/src/mistralai/client/__init__.py new file mode 100644 index 00000000..dd02e42e --- /dev/null +++ b/src/mistralai/client/__init__.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from ._version import ( + __title__, + __version__, + __openapi_doc_version__, + __gen_version__, + __user_agent__, +) +from .sdk import * +from .sdkconfiguration import * +from .models import * + + +VERSION: str = __version__ +OPENAPI_DOC_VERSION = __openapi_doc_version__ +SPEAKEASY_GENERATOR_VERSION = __gen_version__ +USER_AGENT = __user_agent__ diff --git a/src/mistralai/client/_hooks/__init__.py b/src/mistralai/client/_hooks/__init__.py new file mode 100644 index 00000000..2ee66cdd --- /dev/null +++ b/src/mistralai/client/_hooks/__init__.py @@ -0,0 +1,5 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkhooks import * +from .types import * +from .registration import * diff --git a/src/mistralai/client/_hooks/registration.py b/src/mistralai/client/_hooks/registration.py new file mode 100644 index 00000000..cab47787 --- /dev/null +++ b/src/mistralai/client/_hooks/registration.py @@ -0,0 +1,13 @@ +from .types import Hooks + + +# This file is only ever generated once on the first generation and then is free to be modified. +# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them +# in this file or in separate files in the hooks folder. + + +def init_hooks(hooks: Hooks): + # pylint: disable=unused-argument + """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook + with an instance of a hook that implements that specific Hook interface + Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance""" diff --git a/src/mistralai/client/_hooks/sdkhooks.py b/src/mistralai/client/_hooks/sdkhooks.py new file mode 100644 index 00000000..c9318db4 --- /dev/null +++ b/src/mistralai/client/_hooks/sdkhooks.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from .types import ( + SDKInitHook, + BeforeRequestContext, + BeforeRequestHook, + AfterSuccessContext, + AfterSuccessHook, + AfterErrorContext, + AfterErrorHook, + Hooks, +) +from .registration import init_hooks +from typing import List, Optional, Tuple +from mistralai.client.httpclient import HttpClient + + +class SDKHooks(Hooks): + def __init__(self) -> None: + self.sdk_init_hooks: List[SDKInitHook] = [] + self.before_request_hooks: List[BeforeRequestHook] = [] + self.after_success_hooks: List[AfterSuccessHook] = [] + self.after_error_hooks: List[AfterErrorHook] = [] + init_hooks(self) + + def register_sdk_init_hook(self, hook: SDKInitHook) -> None: + self.sdk_init_hooks.append(hook) + + def register_before_request_hook(self, hook: BeforeRequestHook) -> None: + self.before_request_hooks.append(hook) + + def register_after_success_hook(self, hook: AfterSuccessHook) -> None: + self.after_success_hooks.append(hook) + + def register_after_error_hook(self, hook: AfterErrorHook) -> None: + self.after_error_hooks.append(hook) + + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + for hook in self.sdk_init_hooks: + base_url, client = hook.sdk_init(base_url, client) + return base_url, client + + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> httpx.Request: + for hook in self.before_request_hooks: + out = hook.before_request(hook_ctx, request) + if isinstance(out, Exception): + raise out + request = out + + return request + + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> httpx.Response: + for hook in self.after_success_hooks: + out = hook.after_success(hook_ctx, response) + if isinstance(out, Exception): + raise out + response = out + return response + + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + for hook in self.after_error_hooks: + result = hook.after_error(hook_ctx, response, error) + if isinstance(result, Exception): + raise result + response, error = result + return response, error diff --git a/src/mistralai/client/_hooks/types.py b/src/mistralai/client/_hooks/types.py new file mode 100644 index 00000000..e7e1bb7f --- /dev/null +++ b/src/mistralai/client/_hooks/types.py @@ -0,0 +1,113 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from abc import ABC, abstractmethod +import httpx +from mistralai.client.httpclient import HttpClient +from mistralai.client.sdkconfiguration import SDKConfiguration +from typing import Any, Callable, List, Optional, Tuple, Union + + +class HookContext: + config: SDKConfiguration + base_url: str + operation_id: str + oauth2_scopes: Optional[List[str]] = None + security_source: Optional[Union[Any, Callable[[], Any]]] = None + + def __init__( + self, + config: SDKConfiguration, + base_url: str, + operation_id: str, + oauth2_scopes: Optional[List[str]], + security_source: Optional[Union[Any, Callable[[], Any]]], + ): + self.config = config + self.base_url = base_url + self.operation_id = operation_id + self.oauth2_scopes = oauth2_scopes + self.security_source = security_source + + +class BeforeRequestContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class AfterSuccessContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class AfterErrorContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class SDKInitHook(ABC): + @abstractmethod + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + pass + + +class BeforeRequestHook(ABC): + @abstractmethod + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + pass + + +class AfterSuccessHook(ABC): + @abstractmethod + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: + pass + + +class AfterErrorHook(ABC): + @abstractmethod + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + pass + + +class Hooks(ABC): + @abstractmethod + def register_sdk_init_hook(self, hook: SDKInitHook): + pass + + @abstractmethod + def register_before_request_hook(self, hook: BeforeRequestHook): + pass + + @abstractmethod + def register_after_success_hook(self, hook: AfterSuccessHook): + pass + + @abstractmethod + def register_after_error_hook(self, hook: AfterErrorHook): + pass diff --git a/src/mistralai/client/_version.py b/src/mistralai/client/_version.py new file mode 100644 index 00000000..8c5d6e54 --- /dev/null +++ b/src/mistralai/client/_version.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import importlib.metadata + +__title__: str = "mistralai" +__version__: str = "2.0.0a1" +__openapi_doc_version__: str = "1.0.0" +__gen_version__: str = "2.794.1" +__user_agent__: str = "speakeasy-sdk/python 2.0.0a1 2.794.1 1.0.0 mistralai" + +try: + if __package__ is not None: + __version__ = importlib.metadata.version(__package__) +except importlib.metadata.PackageNotFoundError: + pass diff --git a/src/mistralai/client/accesses.py b/src/mistralai/client/accesses.py new file mode 100644 index 00000000..307c7156 --- /dev/null +++ b/src/mistralai/client/accesses.py @@ -0,0 +1,619 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + entitytype as models_entitytype, + shareenum as models_shareenum, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Mapping, Optional + + +class Accesses(BaseSDK): + r"""(beta) Libraries API - manage access to a library.""" + + def list( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListSharingOut: + r"""List all of the access to this library. + + Given a library, list all of the Entity that have access and to what level. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareListV1Request( + library_id=library_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListSharingOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListSharingOut: + r"""List all of the access to this library. + + Given a library, list all of the Entity that have access and to what level. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareListV1Request( + library_id=library_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListSharingOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def update_or_create( + self, + *, + library_id: str, + level: models_shareenum.ShareEnum, + share_with_uuid: str, + share_with_type: models_entitytype.EntityType, + org_id: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SharingOut: + r"""Create or update an access level. + + Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. + + :param library_id: + :param level: + :param share_with_uuid: The id of the entity (user, workspace or organization) to share with + :param share_with_type: The type of entity, used to share a library. + :param org_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareCreateV1Request( + library_id=library_id, + sharing_in=models.SharingIn( + org_id=org_id, + level=level, + share_with_uuid=share_with_uuid, + share_with_type=share_with_type, + ), + ) + + req = self._build_request( + method="PUT", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.sharing_in, False, False, "json", models.SharingIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_create_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.SharingOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def update_or_create_async( + self, + *, + library_id: str, + level: models_shareenum.ShareEnum, + share_with_uuid: str, + share_with_type: models_entitytype.EntityType, + org_id: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SharingOut: + r"""Create or update an access level. + + Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. + + :param library_id: + :param level: + :param share_with_uuid: The id of the entity (user, workspace or organization) to share with + :param share_with_type: The type of entity, used to share a library. + :param org_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareCreateV1Request( + library_id=library_id, + sharing_in=models.SharingIn( + org_id=org_id, + level=level, + share_with_uuid=share_with_uuid, + share_with_type=share_with_type, + ), + ) + + req = self._build_request_async( + method="PUT", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.sharing_in, False, False, "json", models.SharingIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_create_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.SharingOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + library_id: str, + share_with_uuid: str, + share_with_type: models_entitytype.EntityType, + org_id: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SharingOut: + r"""Delete an access level. + + Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. + + :param library_id: + :param share_with_uuid: The id of the entity (user, workspace or organization) to share with + :param share_with_type: The type of entity, used to share a library. + :param org_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareDeleteV1Request( + library_id=library_id, + sharing_delete=models.SharingDelete( + org_id=org_id, + share_with_uuid=share_with_uuid, + share_with_type=share_with_type, + ), + ) + + req = self._build_request( + method="DELETE", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.sharing_delete, False, False, "json", models.SharingDelete + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.SharingOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + library_id: str, + share_with_uuid: str, + share_with_type: models_entitytype.EntityType, + org_id: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SharingOut: + r"""Delete an access level. + + Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. + + :param library_id: + :param share_with_uuid: The id of the entity (user, workspace or organization) to share with + :param share_with_type: The type of entity, used to share a library. + :param org_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareDeleteV1Request( + library_id=library_id, + sharing_delete=models.SharingDelete( + org_id=org_id, + share_with_uuid=share_with_uuid, + share_with_type=share_with_type, + ), + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.sharing_delete, False, False, "json", models.SharingDelete + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.SharingOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/agents.py b/src/mistralai/client/agents.py new file mode 100644 index 00000000..c04abd21 --- /dev/null +++ b/src/mistralai/client/agents.py @@ -0,0 +1,725 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + agentscompletionrequest as models_agentscompletionrequest, + agentscompletionstreamrequest as models_agentscompletionstreamrequest, + mistralpromptmode as models_mistralpromptmode, + prediction as models_prediction, + responseformat as models_responseformat, + tool as models_tool, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class Agents(BaseSDK): + r"""Agents API.""" + + def complete( + self, + *, + messages: Union[ + List[models_agentscompletionrequest.AgentsCompletionRequestMessages], + List[ + models_agentscompletionrequest.AgentsCompletionRequestMessagesTypedDict + ], + ], + agent_id: str, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models_agentscompletionrequest.AgentsCompletionRequestStop, + models_agentscompletionrequest.AgentsCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = None, + tools: OptionalNullable[ + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models_agentscompletionrequest.AgentsCompletionRequestToolChoice, + models_agentscompletionrequest.AgentsCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Agents Completion + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param agent_id: The ID of the agent to use for this completion. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: + :param tool_choice: + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsCompletionRequest( + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + agent_id=agent_id, + ) + + req = self._build_request( + method="POST", + path="/v1/agents/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_completion_v1_agents_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + messages: Union[ + List[models_agentscompletionrequest.AgentsCompletionRequestMessages], + List[ + models_agentscompletionrequest.AgentsCompletionRequestMessagesTypedDict + ], + ], + agent_id: str, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models_agentscompletionrequest.AgentsCompletionRequestStop, + models_agentscompletionrequest.AgentsCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = None, + tools: OptionalNullable[ + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models_agentscompletionrequest.AgentsCompletionRequestToolChoice, + models_agentscompletionrequest.AgentsCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Agents Completion + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param agent_id: The ID of the agent to use for this completion. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: + :param tool_choice: + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsCompletionRequest( + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + agent_id=agent_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/agents/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_completion_v1_agents_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def stream( + self, + *, + messages: Union[ + List[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessages + ], + List[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessagesTypedDict + ], + ], + agent_id: str, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStop, + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = None, + tools: OptionalNullable[ + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoice, + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.CompletionEvent]: + r"""Stream Agents completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param agent_id: The ID of the agent to use for this completion. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: + :param tool_choice: + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsCompletionStreamRequest( + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionStreamRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + agent_id=agent_id, + ) + + req = self._build_request( + method="POST", + path="/v1/agents/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_agents", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + messages: Union[ + List[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessages + ], + List[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessagesTypedDict + ], + ], + agent_id: str, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStop, + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = None, + tools: OptionalNullable[ + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoice, + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + r"""Stream Agents completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param agent_id: The ID of the agent to use for this completion. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: + :param tool_choice: + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsCompletionStreamRequest( + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionStreamRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + agent_id=agent_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/agents/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_agents", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/audio.py b/src/mistralai/client/audio.py new file mode 100644 index 00000000..28ccda1b --- /dev/null +++ b/src/mistralai/client/audio.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client.transcriptions import Transcriptions +from typing import Optional + + +class Audio(BaseSDK): + transcriptions: Transcriptions + r"""API for audio transcription.""" + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.transcriptions = Transcriptions( + self.sdk_configuration, parent_ref=self.parent_ref + ) diff --git a/src/mistralai/client/basesdk.py b/src/mistralai/client/basesdk.py new file mode 100644 index 00000000..bddc9012 --- /dev/null +++ b/src/mistralai/client/basesdk.py @@ -0,0 +1,370 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkconfiguration import SDKConfiguration +import httpx +from mistralai.client import models, utils +from mistralai.client._hooks import ( + AfterErrorContext, + AfterSuccessContext, + BeforeRequestContext, +) +from mistralai.client.utils import RetryConfig, SerializedRequestBody, get_body_content +from typing import Callable, List, Mapping, Optional, Tuple +from urllib.parse import parse_qs, urlparse + + +class BaseSDK: + sdk_configuration: SDKConfiguration + parent_ref: Optional[object] = None + """ + Reference to the root SDK instance, if any. This will prevent it from + being garbage collected while there are active streams. + """ + + def __init__( + self, + sdk_config: SDKConfiguration, + parent_ref: Optional[object] = None, + ) -> None: + self.sdk_configuration = sdk_config + self.parent_ref = parent_ref + + def _get_url(self, base_url, url_variables): + sdk_url, sdk_variables = self.sdk_configuration.get_server_details() + + if base_url is None: + base_url = sdk_url + + if url_variables is None: + url_variables = sdk_variables + + return utils.template_url(base_url, url_variables) + + def _build_request_async( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + client = self.sdk_configuration.async_client + return self._build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + http_headers, + allow_empty_value, + ) + + def _build_request( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + client = self.sdk_configuration.client + return self._build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + http_headers, + allow_empty_value, + ) + + def _build_request_with_client( + self, + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + query_params = {} + + url = url_override + if url is None: + url = utils.generate_url( + self._get_url(base_url, url_variables), + path, + request if request_has_path_params else None, + _globals if request_has_path_params else None, + ) + + query_params = utils.get_query_params( + request if request_has_query_params else None, + _globals if request_has_query_params else None, + allow_empty_value, + ) + else: + # Pick up the query parameter from the override so they can be + # preserved when building the request later on (necessary as of + # httpx 0.28). + parsed_override = urlparse(str(url_override)) + query_params = parse_qs(parsed_override.query, keep_blank_values=True) + + headers = utils.get_headers(request, _globals) + headers["Accept"] = accept_header_value + headers[user_agent_header] = self.sdk_configuration.user_agent + + if security is not None: + if callable(security): + security = security() + security = utils.get_security_from_env(security, models.Security) + if security is not None: + security_headers, security_query_params = utils.get_security(security) + headers = {**headers, **security_headers} + query_params = {**query_params, **security_query_params} + + serialized_request_body = SerializedRequestBody() + if get_serialized_body is not None: + rb = get_serialized_body() + if request_body_required and rb is None: + raise ValueError("request body is required") + + if rb is not None: + serialized_request_body = rb + + if ( + serialized_request_body.media_type is not None + and serialized_request_body.media_type + not in ( + "multipart/form-data", + "multipart/mixed", + ) + ): + headers["content-type"] = serialized_request_body.media_type + + if http_headers is not None: + for header, value in http_headers.items(): + headers[header] = value + + timeout = timeout_ms / 1000 if timeout_ms is not None else None + + return client.build_request( + method, + url, + params=query_params, + content=serialized_request_body.content, + data=serialized_request_body.data, + files=serialized_request_body.files, + headers=headers, + timeout=timeout, + ) + + def do_request( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.client + logger = self.sdk_configuration.debug_logger + + hooks = self.sdk_configuration.__dict__["_hooks"] + + def do(): + http_res = None + try: + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req), + ) + + if client is None: + raise ValueError("client is required") + + http_res = client.send(req, stream=stream) + except Exception as e: + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise models.NoResponseError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text, + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = hooks.after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise models.SDKError("Unexpected error occurred", http_res) + + return http_res + + if retry_config is not None: + http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) + else: + http_res = do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) + + return http_res + + async def do_request_async( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.async_client + logger = self.sdk_configuration.debug_logger + + hooks = self.sdk_configuration.__dict__["_hooks"] + + async def do(): + http_res = None + try: + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req), + ) + + if client is None: + raise ValueError("client is required") + + http_res = await client.send(req, stream=stream) + except Exception as e: + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise models.NoResponseError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text, + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = hooks.after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise models.SDKError("Unexpected error occurred", http_res) + + return http_res + + if retry_config is not None: + http_res = await utils.retry_async( + do, utils.Retries(retry_config[0], retry_config[1]) + ) + else: + http_res = await do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) + + return http_res diff --git a/src/mistralai/client/batch.py b/src/mistralai/client/batch.py new file mode 100644 index 00000000..d53a45fb --- /dev/null +++ b/src/mistralai/client/batch.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client.mistral_jobs import MistralJobs +from typing import Optional + + +class Batch(BaseSDK): + jobs: MistralJobs + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.jobs = MistralJobs(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/beta.py b/src/mistralai/client/beta.py new file mode 100644 index 00000000..b30003ea --- /dev/null +++ b/src/mistralai/client/beta.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client.conversations import Conversations +from mistralai.client.libraries import Libraries +from mistralai.client.mistral_agents import MistralAgents +from typing import Optional + + +class Beta(BaseSDK): + conversations: Conversations + r"""(beta) Conversations API""" + agents: MistralAgents + r"""(beta) Agents API""" + libraries: Libraries + r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.conversations = Conversations( + self.sdk_configuration, parent_ref=self.parent_ref + ) + self.agents = MistralAgents(self.sdk_configuration, parent_ref=self.parent_ref) + self.libraries = Libraries(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/chat.py b/src/mistralai/client/chat.py new file mode 100644 index 00000000..9c50bce8 --- /dev/null +++ b/src/mistralai/client/chat.py @@ -0,0 +1,753 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + chatcompletionrequest as models_chatcompletionrequest, + chatcompletionstreamrequest as models_chatcompletionstreamrequest, + mistralpromptmode as models_mistralpromptmode, + prediction as models_prediction, + responseformat as models_responseformat, + tool as models_tool, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class Chat(BaseSDK): + r"""Chat Completion API.""" + + def complete( + self, + *, + model: str, + messages: Union[ + List[models_chatcompletionrequest.Messages], + List[models_chatcompletionrequest.MessagesTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models_chatcompletionrequest.Stop, + models_chatcompletionrequest.StopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = None, + tools: OptionalNullable[ + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models_chatcompletionrequest.ChatCompletionRequestToolChoice, + models_chatcompletionrequest.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model(messages, List[models.Messages]), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request( + method="POST", + path="/v1/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + model: str, + messages: Union[ + List[models_chatcompletionrequest.Messages], + List[models_chatcompletionrequest.MessagesTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models_chatcompletionrequest.Stop, + models_chatcompletionrequest.StopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = None, + tools: OptionalNullable[ + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models_chatcompletionrequest.ChatCompletionRequestToolChoice, + models_chatcompletionrequest.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model(messages, List[models.Messages]), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request_async( + method="POST", + path="/v1/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def stream( + self, + *, + model: str, + messages: Union[ + List[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessages + ], + List[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessagesTypedDict + ], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestStop, + models_chatcompletionstreamrequest.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = None, + tools: OptionalNullable[ + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoice, + models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.CompletionEvent]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request( + method="POST", + path="/v1/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_chat", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + model: str, + messages: Union[ + List[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessages + ], + List[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessagesTypedDict + ], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestStop, + models_chatcompletionstreamrequest.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = None, + tools: OptionalNullable[ + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoice, + models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request_async( + method="POST", + path="/v1/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_chat", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/classifiers.py b/src/mistralai/client/classifiers.py new file mode 100644 index 00000000..537e2438 --- /dev/null +++ b/src/mistralai/client/classifiers.py @@ -0,0 +1,800 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + chatmoderationrequest as models_chatmoderationrequest, + classificationrequest as models_classificationrequest, + inputs as models_inputs, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union + + +class Classifiers(BaseSDK): + r"""Classifiers API.""" + + def moderate( + self, + *, + model: str, + inputs: Union[ + models_classificationrequest.ClassificationRequestInputs, + models_classificationrequest.ClassificationRequestInputsTypedDict, + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ModerationResponse: + r"""Moderations + + :param model: ID of the model to use. + :param inputs: Text to classify. + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ClassificationRequest( + model=model, + metadata=metadata, + inputs=inputs, + ) + + req = self._build_request( + method="POST", + path="/v1/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="moderations_v1_moderations_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ModerationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def moderate_async( + self, + *, + model: str, + inputs: Union[ + models_classificationrequest.ClassificationRequestInputs, + models_classificationrequest.ClassificationRequestInputsTypedDict, + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ModerationResponse: + r"""Moderations + + :param model: ID of the model to use. + :param inputs: Text to classify. + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ClassificationRequest( + model=model, + metadata=metadata, + inputs=inputs, + ) + + req = self._build_request_async( + method="POST", + path="/v1/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="moderations_v1_moderations_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ModerationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def moderate_chat( + self, + *, + inputs: Union[ + models_chatmoderationrequest.ChatModerationRequestInputs, + models_chatmoderationrequest.ChatModerationRequestInputsTypedDict, + ], + model: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ModerationResponse: + r"""Chat Moderations + + :param inputs: Chat to classify + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatModerationRequest( + inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), + model=model, + ) + + req = self._build_request( + method="POST", + path="/v1/chat/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatModerationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_moderations_v1_chat_moderations_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ModerationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def moderate_chat_async( + self, + *, + inputs: Union[ + models_chatmoderationrequest.ChatModerationRequestInputs, + models_chatmoderationrequest.ChatModerationRequestInputsTypedDict, + ], + model: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ModerationResponse: + r"""Chat Moderations + + :param inputs: Chat to classify + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatModerationRequest( + inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), + model=model, + ) + + req = self._build_request_async( + method="POST", + path="/v1/chat/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatModerationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_moderations_v1_chat_moderations_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ModerationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def classify( + self, + *, + model: str, + inputs: Union[ + models_classificationrequest.ClassificationRequestInputs, + models_classificationrequest.ClassificationRequestInputsTypedDict, + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Classifications + + :param model: ID of the model to use. + :param inputs: Text to classify. + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ClassificationRequest( + model=model, + metadata=metadata, + inputs=inputs, + ) + + req = self._build_request( + method="POST", + path="/v1/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="classifications_v1_classifications_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ClassificationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def classify_async( + self, + *, + model: str, + inputs: Union[ + models_classificationrequest.ClassificationRequestInputs, + models_classificationrequest.ClassificationRequestInputsTypedDict, + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Classifications + + :param model: ID of the model to use. + :param inputs: Text to classify. + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ClassificationRequest( + model=model, + metadata=metadata, + inputs=inputs, + ) + + req = self._build_request_async( + method="POST", + path="/v1/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="classifications_v1_classifications_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ClassificationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def classify_chat( + self, + *, + model: str, + inputs: Union[models_inputs.Inputs, models_inputs.InputsTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Chat Classifications + + :param model: + :param inputs: Chat to classify + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatClassificationRequest( + model=model, + inputs=utils.get_pydantic_model(inputs, models.Inputs), + ) + + req = self._build_request( + method="POST", + path="/v1/chat/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatClassificationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_classifications_v1_chat_classifications_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ClassificationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def classify_chat_async( + self, + *, + model: str, + inputs: Union[models_inputs.Inputs, models_inputs.InputsTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Chat Classifications + + :param model: + :param inputs: Chat to classify + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatClassificationRequest( + model=model, + inputs=utils.get_pydantic_model(inputs, models.Inputs), + ) + + req = self._build_request_async( + method="POST", + path="/v1/chat/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatClassificationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_classifications_v1_chat_classifications_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ClassificationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/conversations.py b/src/mistralai/client/conversations.py new file mode 100644 index 00000000..9caf4221 --- /dev/null +++ b/src/mistralai/client/conversations.py @@ -0,0 +1,2657 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + completionargs as models_completionargs, + conversationappendrequest as models_conversationappendrequest, + conversationappendstreamrequest as models_conversationappendstreamrequest, + conversationinputs as models_conversationinputs, + conversationrequest as models_conversationrequest, + conversationrestartrequest as models_conversationrestartrequest, + conversationrestartstreamrequest as models_conversationrestartstreamrequest, + conversationstreamrequest as models_conversationstreamrequest, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class Conversations(BaseSDK): + r"""(beta) Conversations API""" + + def start( + self, + *, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + stream: Optional[bool] = False, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models_conversationrequest.HandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models_conversationrequest.Tools], + List[models_conversationrequest.ToolsTypedDict], + ] + ] = None, + completion_args: OptionalNullable[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrequest.AgentVersion, + models_conversationrequest.AgentVersionTypedDict, + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: + :param name: + :param description: + :param metadata: + :param agent_id: + :param agent_version: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model(tools, Optional[List[models.Tools]]), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + metadata=metadata, + agent_id=agent_id, + agent_version=agent_version, + model=model, + ) + + req = self._build_request( + method="POST", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def start_async( + self, + *, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + stream: Optional[bool] = False, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models_conversationrequest.HandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models_conversationrequest.Tools], + List[models_conversationrequest.ToolsTypedDict], + ] + ] = None, + completion_args: OptionalNullable[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrequest.AgentVersion, + models_conversationrequest.AgentVersionTypedDict, + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: + :param name: + :param description: + :param metadata: + :param agent_id: + :param agent_version: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model(tools, Optional[List[models.Tools]]), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + metadata=metadata, + agent_id=agent_id, + agent_version=agent_version, + model=model, + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.ResponseBody]: + r"""List all created conversations. + + Retrieve a list of conversation entities sorted by creation time. + + :param page: + :param page_size: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsListRequest( + page=page, + page_size=page_size, + metadata=metadata, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_list", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.ResponseBody], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.ResponseBody]: + r"""List all created conversations. + + Retrieve a list of conversation entities sorted by creation time. + + :param page: + :param page_size: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsListRequest( + page=page, + page_size=page_size, + metadata=metadata, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_list", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.ResponseBody], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet: + r"""Retrieve a conversation information. + + Given a conversation_id retrieve a conversation entity with its attributes. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsGetRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, http_res + ) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet: + r"""Retrieve a conversation information. + + Given a conversation_id retrieve a conversation entity with its attributes. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsGetRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, http_res + ) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a conversation. + + Delete a conversation given a conversation_id. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsDeleteRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a conversation. + + Delete a conversation given a conversation_id. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsDeleteRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def append( + self, + *, + conversation_id: str, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models_conversationappendrequest.ConversationAppendRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendRequest( + conversation_id=conversation_id, + conversation_append_request=models.ConversationAppendRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_request, + False, + False, + "json", + models.ConversationAppendRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def append_async( + self, + *, + conversation_id: str, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models_conversationappendrequest.ConversationAppendRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendRequest( + conversation_id=conversation_id, + conversation_append_request=models.ConversationAppendRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_request, + False, + False, + "json", + models.ConversationAppendRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get_history( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationHistory: + r"""Retrieve all entries in a conversation. + + Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. + + :param conversation_id: ID of the conversation from which we are fetching entries. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsHistoryRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations/{conversation_id}/history", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_history", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationHistory, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_history_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationHistory: + r"""Retrieve all entries in a conversation. + + Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. + + :param conversation_id: ID of the conversation from which we are fetching entries. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsHistoryRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations/{conversation_id}/history", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_history", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationHistory, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get_messages( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationMessages: + r"""Retrieve all messages in a conversation. + + Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. + + :param conversation_id: ID of the conversation from which we are fetching messages. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsMessagesRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations/{conversation_id}/messages", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_messages", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationMessages, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_messages_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationMessages: + r"""Retrieve all messages in a conversation. + + Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. + + :param conversation_id: ID of the conversation from which we are fetching messages. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsMessagesRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations/{conversation_id}/messages", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_messages", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationMessages, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def restart( + self, + *, + conversation_id: str, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + from_entry_id: str, + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models_conversationrestartrequest.ConversationRestartRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrestartrequest.ConversationRestartRequestAgentVersion, + models_conversationrestartrequest.ConversationRestartRequestAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: ID of the original conversation which is being restarted. + :param inputs: + :param from_entry_id: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartRequest( + conversation_id=conversation_id, + conversation_restart_request=models.ConversationRestartRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + metadata=metadata, + from_entry_id=from_entry_id, + agent_version=agent_version, + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}/restart", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_request, + False, + False, + "json", + models.ConversationRestartRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def restart_async( + self, + *, + conversation_id: str, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + from_entry_id: str, + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models_conversationrestartrequest.ConversationRestartRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrestartrequest.ConversationRestartRequestAgentVersion, + models_conversationrestartrequest.ConversationRestartRequestAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: ID of the original conversation which is being restarted. + :param inputs: + :param from_entry_id: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartRequest( + conversation_id=conversation_id, + conversation_restart_request=models.ConversationRestartRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + metadata=metadata, + from_entry_id=from_entry_id, + agent_version=agent_version, + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}/restart", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_request, + False, + False, + "json", + models.ConversationRestartRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def start_stream( + self, + *, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + stream: Optional[bool] = True, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models_conversationstreamrequest.ConversationStreamRequestHandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models_conversationstreamrequest.ConversationStreamRequestTools], + List[ + models_conversationstreamrequest.ConversationStreamRequestToolsTypedDict + ], + ] + ] = None, + completion_args: OptionalNullable[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationstreamrequest.ConversationStreamRequestAgentVersion, + models_conversationstreamrequest.ConversationStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.ConversationEvents]: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: + :param name: + :param description: + :param metadata: + :param agent_id: + :param agent_version: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationStreamRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + metadata=metadata, + agent_id=agent_id, + agent_version=agent_version, + model=model, + ) + + req = self._build_request( + method="POST", + path="/v1/conversations#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + async def start_stream_async( + self, + *, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + stream: Optional[bool] = True, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models_conversationstreamrequest.ConversationStreamRequestHandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models_conversationstreamrequest.ConversationStreamRequestTools], + List[ + models_conversationstreamrequest.ConversationStreamRequestToolsTypedDict + ], + ] + ] = None, + completion_args: OptionalNullable[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationstreamrequest.ConversationStreamRequestAgentVersion, + models_conversationstreamrequest.ConversationStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: + :param name: + :param description: + :param metadata: + :param agent_id: + :param agent_version: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationStreamRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + metadata=metadata, + agent_id=agent_id, + agent_version=agent_version, + model=model, + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + def append_stream( + self, + *, + conversation_id: str, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models_conversationappendstreamrequest.ConversationAppendStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.ConversationEvents]: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendStreamRequest( + conversation_id=conversation_id, + conversation_append_stream_request=models.ConversationAppendStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_stream_request, + False, + False, + "json", + models.ConversationAppendStreamRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + async def append_stream_async( + self, + *, + conversation_id: str, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models_conversationappendstreamrequest.ConversationAppendStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendStreamRequest( + conversation_id=conversation_id, + conversation_append_stream_request=models.ConversationAppendStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_stream_request, + False, + False, + "json", + models.ConversationAppendStreamRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + def restart_stream( + self, + *, + conversation_id: str, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + from_entry_id: str, + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models_conversationrestartstreamrequest.ConversationRestartStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersion, + models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.ConversationEvents]: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: ID of the original conversation which is being restarted. + :param inputs: + :param from_entry_id: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartStreamRequest( + conversation_id=conversation_id, + conversation_restart_stream_request=models.ConversationRestartStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + metadata=metadata, + from_entry_id=from_entry_id, + agent_version=agent_version, + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}/restart#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_stream_request, + False, + False, + "json", + models.ConversationRestartStreamRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + async def restart_stream_async( + self, + *, + conversation_id: str, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + from_entry_id: str, + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models_conversationrestartstreamrequest.ConversationRestartStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersion, + models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: ID of the original conversation which is being restarted. + :param inputs: + :param from_entry_id: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartStreamRequest( + conversation_id=conversation_id, + conversation_restart_stream_request=models.ConversationRestartStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + metadata=metadata, + from_entry_id=from_entry_id, + agent_version=agent_version, + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}/restart#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_stream_request, + False, + False, + "json", + models.ConversationRestartStreamRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/documents.py b/src/mistralai/client/documents.py new file mode 100644 index 00000000..009a604f --- /dev/null +++ b/src/mistralai/client/documents.py @@ -0,0 +1,1981 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + documentupdatein as models_documentupdatein, + file as models_file, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union + + +class Documents(BaseSDK): + r"""(beta) Libraries API - manage documents in a library.""" + + def list( + self, + *, + library_id: str, + search: OptionalNullable[str] = UNSET, + page_size: Optional[int] = 100, + page: Optional[int] = 0, + filters_attributes: OptionalNullable[str] = UNSET, + sort_by: Optional[str] = "created_at", + sort_order: Optional[str] = "desc", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListDocumentOut: + r"""List documents in a given library. + + Given a library, lists the document that have been uploaded to that library. + + :param library_id: + :param search: + :param page_size: + :param page: + :param filters_attributes: + :param sort_by: + :param sort_order: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsListV1Request( + library_id=library_id, + search=search, + page_size=page_size, + page=page, + filters_attributes=filters_attributes, + sort_by=sort_by, + sort_order=sort_order, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListDocumentOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + library_id: str, + search: OptionalNullable[str] = UNSET, + page_size: Optional[int] = 100, + page: Optional[int] = 0, + filters_attributes: OptionalNullable[str] = UNSET, + sort_by: Optional[str] = "created_at", + sort_order: Optional[str] = "desc", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListDocumentOut: + r"""List documents in a given library. + + Given a library, lists the document that have been uploaded to that library. + + :param library_id: + :param search: + :param page_size: + :param page: + :param filters_attributes: + :param sort_by: + :param sort_order: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsListV1Request( + library_id=library_id, + search=search, + page_size=page_size, + page=page, + filters_attributes=filters_attributes, + sort_by=sort_by, + sort_order=sort_order, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListDocumentOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def upload( + self, + *, + library_id: str, + file: Union[models_file.File, models_file.FileTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentOut: + r"""Upload a new document. + + Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search + + :param library_id: + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsUploadV1Request( + library_id=library_id, + request_body=models.LibrariesDocumentsUploadV1DocumentUpload( + file=utils.get_pydantic_model(file, models.File), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/libraries/{library_id}/documents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.request_body, + False, + False, + "multipart", + models.LibrariesDocumentsUploadV1DocumentUpload, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_upload_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, ["200", "201"], "application/json"): + return unmarshal_json_response(models.DocumentOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def upload_async( + self, + *, + library_id: str, + file: Union[models_file.File, models_file.FileTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentOut: + r"""Upload a new document. + + Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search + + :param library_id: + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsUploadV1Request( + library_id=library_id, + request_body=models.LibrariesDocumentsUploadV1DocumentUpload( + file=utils.get_pydantic_model(file, models.File), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/libraries/{library_id}/documents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.request_body, + False, + False, + "multipart", + models.LibrariesDocumentsUploadV1DocumentUpload, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_upload_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, ["200", "201"], "application/json"): + return unmarshal_json_response(models.DocumentOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentOut: + r"""Retrieve the metadata of a specific document. + + Given a library and a document in this library, you can retrieve the metadata of that document. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DocumentOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentOut: + r"""Retrieve the metadata of a specific document. + + Given a library and a document in this library, you can retrieve the metadata of that document. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DocumentOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def update( + self, + *, + library_id: str, + document_id: str, + name: OptionalNullable[str] = UNSET, + attributes: OptionalNullable[ + Union[ + Dict[str, models_documentupdatein.Attributes], + Dict[str, models_documentupdatein.AttributesTypedDict], + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentOut: + r"""Update the metadata of a specific document. + + Given a library and a document in that library, update the name of that document. + + :param library_id: + :param document_id: + :param name: + :param attributes: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsUpdateV1Request( + library_id=library_id, + document_id=document_id, + document_update_in=models.DocumentUpdateIn( + name=name, + attributes=attributes, + ), + ) + + req = self._build_request( + method="PUT", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.document_update_in, + False, + False, + "json", + models.DocumentUpdateIn, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_update_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DocumentOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def update_async( + self, + *, + library_id: str, + document_id: str, + name: OptionalNullable[str] = UNSET, + attributes: OptionalNullable[ + Union[ + Dict[str, models_documentupdatein.Attributes], + Dict[str, models_documentupdatein.AttributesTypedDict], + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentOut: + r"""Update the metadata of a specific document. + + Given a library and a document in that library, update the name of that document. + + :param library_id: + :param document_id: + :param name: + :param attributes: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsUpdateV1Request( + library_id=library_id, + document_id=document_id, + document_update_in=models.DocumentUpdateIn( + name=name, + attributes=attributes, + ), + ) + + req = self._build_request_async( + method="PUT", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.document_update_in, + False, + False, + "json", + models.DocumentUpdateIn, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_update_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DocumentOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a document. + + Given a library and a document in that library, delete that document. The document will be deleted from the library and the search index. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsDeleteV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a document. + + Given a library and a document in that library, delete that document. The document will be deleted from the library and the search index. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsDeleteV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def text_content( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentTextContent: + r"""Retrieve the text content of a specific document. + + Given a library and a document in that library, you can retrieve the text content of that document if it exists. For documents like pdf, docx and pptx the text content results from our processing using Mistral OCR. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetTextContentV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/text_content", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_text_content_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DocumentTextContent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def text_content_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentTextContent: + r"""Retrieve the text content of a specific document. + + Given a library and a document in that library, you can retrieve the text content of that document if it exists. For documents like pdf, docx and pptx the text content results from our processing using Mistral OCR. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetTextContentV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/text_content", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_text_content_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DocumentTextContent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def status( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ProcessingStatusOut: + r"""Retrieve the processing status of a specific document. + + Given a library and a document in that library, retrieve the processing status of that document. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetStatusV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/status", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_status_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ProcessingStatusOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def status_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ProcessingStatusOut: + r"""Retrieve the processing status of a specific document. + + Given a library and a document in that library, retrieve the processing status of that document. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetStatusV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/status", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_status_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ProcessingStatusOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get_signed_url( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> str: + r"""Retrieve the signed URL of a specific document. + + Given a library and a document in that library, retrieve the signed URL of a specific document.The url will expire after 30 minutes and can be accessed by anyone with the link. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetSignedURLV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/signed-url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_signed_url_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(str, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_signed_url_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> str: + r"""Retrieve the signed URL of a specific document. + + Given a library and a document in that library, retrieve the signed URL of a specific document.The url will expire after 30 minutes and can be accessed by anyone with the link. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetSignedURLV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/signed-url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_signed_url_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(str, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def extracted_text_signed_url( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> str: + r"""Retrieve the signed URL of text extracted from a given document. + + Given a library and a document in that library, retrieve the signed URL of text extracted. For documents that are sent to the OCR this returns the result of the OCR queries. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/extracted-text-signed-url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_extracted_text_signed_url_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(str, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def extracted_text_signed_url_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> str: + r"""Retrieve the signed URL of text extracted from a given document. + + Given a library and a document in that library, retrieve the signed URL of text extracted. For documents that are sent to the OCR this returns the result of the OCR queries. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/extracted-text-signed-url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_extracted_text_signed_url_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(str, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def reprocess( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Reprocess a document. + + Given a library and a document in that library, reprocess that document, it will be billed again. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsReprocessV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="POST", + path="/v1/libraries/{library_id}/documents/{document_id}/reprocess", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_reprocess_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def reprocess_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Reprocess a document. + + Given a library and a document in that library, reprocess that document, it will be billed again. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsReprocessV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/libraries/{library_id}/documents/{document_id}/reprocess", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_reprocess_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/embeddings.py b/src/mistralai/client/embeddings.py new file mode 100644 index 00000000..359f2f62 --- /dev/null +++ b/src/mistralai/client/embeddings.py @@ -0,0 +1,240 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + embeddingdtype as models_embeddingdtype, + embeddingrequest as models_embeddingrequest, + encodingformat as models_encodingformat, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union + + +class Embeddings(BaseSDK): + r"""Embeddings API.""" + + def create( + self, + *, + model: str, + inputs: Union[ + models_embeddingrequest.EmbeddingRequestInputs, + models_embeddingrequest.EmbeddingRequestInputsTypedDict, + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + output_dimension: OptionalNullable[int] = UNSET, + output_dtype: Optional[models_embeddingdtype.EmbeddingDtype] = None, + encoding_format: Optional[models_encodingformat.EncodingFormat] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.EmbeddingResponse: + r"""Embeddings + + Embeddings + + :param model: The ID of the model to be used for embedding. + :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. + :param metadata: + :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. + :param output_dtype: + :param encoding_format: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.EmbeddingRequest( + model=model, + metadata=metadata, + inputs=inputs, + output_dimension=output_dimension, + output_dtype=output_dtype, + encoding_format=encoding_format, + ) + + req = self._build_request( + method="POST", + path="/v1/embeddings", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.EmbeddingRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="embeddings_v1_embeddings_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.EmbeddingResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + model: str, + inputs: Union[ + models_embeddingrequest.EmbeddingRequestInputs, + models_embeddingrequest.EmbeddingRequestInputsTypedDict, + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + output_dimension: OptionalNullable[int] = UNSET, + output_dtype: Optional[models_embeddingdtype.EmbeddingDtype] = None, + encoding_format: Optional[models_encodingformat.EncodingFormat] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.EmbeddingResponse: + r"""Embeddings + + Embeddings + + :param model: The ID of the model to be used for embedding. + :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. + :param metadata: + :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. + :param output_dtype: + :param encoding_format: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.EmbeddingRequest( + model=model, + metadata=metadata, + inputs=inputs, + output_dimension=output_dimension, + output_dtype=output_dtype, + encoding_format=encoding_format, + ) + + req = self._build_request_async( + method="POST", + path="/v1/embeddings", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.EmbeddingRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="embeddings_v1_embeddings_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.EmbeddingResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/files.py b/src/mistralai/client/files.py new file mode 100644 index 00000000..97817eab --- /dev/null +++ b/src/mistralai/client/files.py @@ -0,0 +1,1120 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +import httpx +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + file as models_file, + filepurpose as models_filepurpose, + sampletype as models_sampletype, + source as models_source, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import List, Mapping, Optional, Union + + +class Files(BaseSDK): + r"""Files API""" + + def upload( + self, + *, + file: Union[models_file.File, models_file.FileTypedDict], + purpose: Optional[models_filepurpose.FilePurpose] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.UploadFileOut: + r"""Upload File + + Upload a file that can be used across various endpoints. + + The size of individual files can be a maximum of 512 MB. The Fine-tuning API only supports .jsonl files. + + Please contact us if you need to increase these storage limits. + + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + :param purpose: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( + purpose=purpose, + file=utils.get_pydantic_model(file, models.File), + ) + + req = self._build_request( + method="POST", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "multipart", + models.FilesAPIRoutesUploadFileMultiPartBodyParams, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_upload_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.UploadFileOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def upload_async( + self, + *, + file: Union[models_file.File, models_file.FileTypedDict], + purpose: Optional[models_filepurpose.FilePurpose] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.UploadFileOut: + r"""Upload File + + Upload a file that can be used across various endpoints. + + The size of individual files can be a maximum of 512 MB. The Fine-tuning API only supports .jsonl files. + + Please contact us if you need to increase these storage limits. + + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + :param purpose: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( + purpose=purpose, + file=utils.get_pydantic_model(file, models.File), + ) + + req = self._build_request_async( + method="POST", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "multipart", + models.FilesAPIRoutesUploadFileMultiPartBodyParams, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_upload_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.UploadFileOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + include_total: Optional[bool] = True, + sample_type: OptionalNullable[List[models_sampletype.SampleType]] = UNSET, + source: OptionalNullable[List[models_source.Source]] = UNSET, + search: OptionalNullable[str] = UNSET, + purpose: OptionalNullable[models_filepurpose.FilePurpose] = UNSET, + mimetypes: OptionalNullable[List[str]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListFilesOut: + r"""List Files + + Returns a list of files that belong to the user's organization. + + :param page: + :param page_size: + :param include_total: + :param sample_type: + :param source: + :param search: + :param purpose: + :param mimetypes: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesListFilesRequest( + page=page, + page_size=page_size, + include_total=include_total, + sample_type=sample_type, + source=source, + search=search, + purpose=purpose, + mimetypes=mimetypes, + ) + + req = self._build_request( + method="GET", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_list_files", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListFilesOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + include_total: Optional[bool] = True, + sample_type: OptionalNullable[List[models_sampletype.SampleType]] = UNSET, + source: OptionalNullable[List[models_source.Source]] = UNSET, + search: OptionalNullable[str] = UNSET, + purpose: OptionalNullable[models_filepurpose.FilePurpose] = UNSET, + mimetypes: OptionalNullable[List[str]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListFilesOut: + r"""List Files + + Returns a list of files that belong to the user's organization. + + :param page: + :param page_size: + :param include_total: + :param sample_type: + :param source: + :param search: + :param purpose: + :param mimetypes: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesListFilesRequest( + page=page, + page_size=page_size, + include_total=include_total, + sample_type=sample_type, + source=source, + search=search, + purpose=purpose, + mimetypes=mimetypes, + ) + + req = self._build_request_async( + method="GET", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_list_files", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListFilesOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def retrieve( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.RetrieveFileOut: + r"""Retrieve File + + Returns information about a specific file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesRetrieveFileRequest( + file_id=file_id, + ) + + req = self._build_request( + method="GET", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_retrieve_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.RetrieveFileOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def retrieve_async( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.RetrieveFileOut: + r"""Retrieve File + + Returns information about a specific file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesRetrieveFileRequest( + file_id=file_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_retrieve_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.RetrieveFileOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DeleteFileOut: + r"""Delete File + + Delete a file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesDeleteFileRequest( + file_id=file_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_delete_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DeleteFileOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DeleteFileOut: + r"""Delete File + + Delete a file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesDeleteFileRequest( + file_id=file_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_delete_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DeleteFileOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def download( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> httpx.Response: + r"""Download File + + Download a file + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesDownloadFileRequest( + file_id=file_id, + ) + + req = self._build_request( + method="GET", + path="/v1/files/{file_id}/content", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/octet-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_download_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/octet-stream"): + return http_res + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + async def download_async( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> httpx.Response: + r"""Download File + + Download a file + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesDownloadFileRequest( + file_id=file_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/files/{file_id}/content", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/octet-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_download_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/octet-stream"): + return http_res + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + def get_signed_url( + self, + *, + file_id: str, + expiry: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FileSignedURL: + r"""Get Signed Url + + :param file_id: + :param expiry: Number of hours before the url becomes invalid. Defaults to 24h + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesGetSignedURLRequest( + file_id=file_id, + expiry=expiry, + ) + + req = self._build_request( + method="GET", + path="/v1/files/{file_id}/url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_get_signed_url", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FileSignedURL, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_signed_url_async( + self, + *, + file_id: str, + expiry: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FileSignedURL: + r"""Get Signed Url + + :param file_id: + :param expiry: Number of hours before the url becomes invalid. Defaults to 24h + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesGetSignedURLRequest( + file_id=file_id, + expiry=expiry, + ) + + req = self._build_request_async( + method="GET", + path="/v1/files/{file_id}/url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_get_signed_url", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FileSignedURL, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/fim.py b/src/mistralai/client/fim.py new file mode 100644 index 00000000..4a834fe9 --- /dev/null +++ b/src/mistralai/client/fim.py @@ -0,0 +1,545 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + fimcompletionrequest as models_fimcompletionrequest, + fimcompletionstreamrequest as models_fimcompletionstreamrequest, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union + + +class Fim(BaseSDK): + r"""Fill-in-the-middle API.""" + + def complete( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models_fimcompletionrequest.FIMCompletionRequestStop, + models_fimcompletionrequest.FIMCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FIMCompletionResponse: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request( + method="POST", + path="/v1/fim/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FIMCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models_fimcompletionrequest.FIMCompletionRequestStop, + models_fimcompletionrequest.FIMCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FIMCompletionResponse: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fim/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FIMCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def stream( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models_fimcompletionstreamrequest.FIMCompletionStreamRequestStop, + models_fimcompletionstreamrequest.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.CompletionEvent]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request( + method="POST", + path="/v1/fim/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_fim", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models_fimcompletionstreamrequest.FIMCompletionStreamRequestStop, + models_fimcompletionstreamrequest.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fim/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_fim", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/fine_tuning.py b/src/mistralai/client/fine_tuning.py new file mode 100644 index 00000000..c57425fd --- /dev/null +++ b/src/mistralai/client/fine_tuning.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client.jobs import Jobs +from typing import Optional + + +class FineTuning(BaseSDK): + jobs: Jobs + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.jobs = Jobs(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/httpclient.py b/src/mistralai/client/httpclient.py new file mode 100644 index 00000000..89560b56 --- /dev/null +++ b/src/mistralai/client/httpclient.py @@ -0,0 +1,125 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +# pyright: reportReturnType = false +import asyncio +from typing_extensions import Protocol, runtime_checkable +import httpx +from typing import Any, Optional, Union + + +@runtime_checkable +class HttpClient(Protocol): + def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + def close(self) -> None: + pass + + +@runtime_checkable +class AsyncHttpClient(Protocol): + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + async def aclose(self) -> None: + pass + + +class ClientOwner(Protocol): + client: Union[HttpClient, None] + async_client: Union[AsyncHttpClient, None] + + +def close_clients( + owner: ClientOwner, + sync_client: Union[HttpClient, None], + sync_client_supplied: bool, + async_client: Union[AsyncHttpClient, None], + async_client_supplied: bool, +) -> None: + """ + A finalizer function that is meant to be used with weakref.finalize to close + httpx clients used by an SDK so that underlying resources can be garbage + collected. + """ + + # Unset the client/async_client properties so there are no more references + # to them from the owning SDK instance and they can be reaped. + owner.client = None + owner.async_client = None + if sync_client is not None and not sync_client_supplied: + try: + sync_client.close() + except Exception: + pass + + if async_client is not None and not async_client_supplied: + try: + loop = asyncio.get_running_loop() + asyncio.run_coroutine_threadsafe(async_client.aclose(), loop) + except RuntimeError: + try: + asyncio.run(async_client.aclose()) + except RuntimeError: + # best effort + pass diff --git a/src/mistralai/client/jobs.py b/src/mistralai/client/jobs.py new file mode 100644 index 00000000..848926ea --- /dev/null +++ b/src/mistralai/client/jobs.py @@ -0,0 +1,1067 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from datetime import datetime +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + classifiertargetin as models_classifiertargetin, + finetuneablemodeltype as models_finetuneablemodeltype, + jobin as models_jobin, + jobs_api_routes_fine_tuning_get_fine_tuning_jobsop as models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop, + trainingfile as models_trainingfile, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import List, Mapping, Optional, Union + + +class Jobs(BaseSDK): + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_before: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[ + models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.QueryParamStatus + ] = UNSET, + wandb_project: OptionalNullable[str] = UNSET, + wandb_name: OptionalNullable[str] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsOut: + r"""Get Fine Tuning Jobs + + Get a list of fine-tuning jobs for your organization and user. + + :param page: The page number of the results to be returned. + :param page_size: The number of items to return per page. + :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. + :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. + :param created_before: + :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. + :param status: The current job state to filter on. When set, the other results are not displayed. + :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. + :param wandb_name: The Weight and Biases run name to filter on. When set, the other results are not displayed. + :param suffix: The model suffix to filter on. When set, the other results are not displayed. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( + page=page, + page_size=page_size, + model=model, + created_after=created_after, + created_before=created_before, + created_by_me=created_by_me, + status=status, + wandb_project=wandb_project, + wandb_name=wandb_name, + suffix=suffix, + ) + + req = self._build_request( + method="GET", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.JobsOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_before: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[ + models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.QueryParamStatus + ] = UNSET, + wandb_project: OptionalNullable[str] = UNSET, + wandb_name: OptionalNullable[str] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsOut: + r"""Get Fine Tuning Jobs + + Get a list of fine-tuning jobs for your organization and user. + + :param page: The page number of the results to be returned. + :param page_size: The number of items to return per page. + :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. + :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. + :param created_before: + :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. + :param status: The current job state to filter on. When set, the other results are not displayed. + :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. + :param wandb_name: The Weight and Biases run name to filter on. When set, the other results are not displayed. + :param suffix: The model suffix to filter on. When set, the other results are not displayed. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( + page=page, + page_size=page_size, + model=model, + created_after=created_after, + created_before=created_before, + created_by_me=created_by_me, + status=status, + wandb_project=wandb_project, + wandb_name=wandb_name, + suffix=suffix, + ) + + req = self._build_request_async( + method="GET", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.JobsOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def create( + self, + *, + model: str, + hyperparameters: Union[ + models_jobin.Hyperparameters, models_jobin.HyperparametersTypedDict + ], + training_files: Optional[ + Union[ + List[models_trainingfile.TrainingFile], + List[models_trainingfile.TrainingFileTypedDict], + ] + ] = None, + validation_files: OptionalNullable[List[str]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + integrations: OptionalNullable[ + Union[ + List[models_jobin.JobInIntegrations], + List[models_jobin.JobInIntegrationsTypedDict], + ] + ] = UNSET, + auto_start: Optional[bool] = None, + invalid_sample_skip_percentage: Optional[float] = 0, + job_type: OptionalNullable[ + models_finetuneablemodeltype.FineTuneableModelType + ] = UNSET, + repositories: OptionalNullable[ + Union[ + List[models_jobin.JobInRepositories], + List[models_jobin.JobInRepositoriesTypedDict], + ] + ] = UNSET, + classifier_targets: OptionalNullable[ + Union[ + List[models_classifiertargetin.ClassifierTargetIn], + List[models_classifiertargetin.ClassifierTargetInTypedDict], + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: + r"""Create Fine Tuning Job + + Create a new fine-tuning job, it will be queued for processing. + + :param model: The name of the model to fine-tune. + :param hyperparameters: + :param training_files: + :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. + :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` + :param integrations: A list of integrations to enable for your fine-tuning job. + :param auto_start: This field will be required in a future release. + :param invalid_sample_skip_percentage: + :param job_type: + :param repositories: + :param classifier_targets: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobIn( + model=model, + training_files=utils.get_pydantic_model( + training_files, Optional[List[models.TrainingFile]] + ), + validation_files=validation_files, + suffix=suffix, + integrations=utils.get_pydantic_model( + integrations, OptionalNullable[List[models.JobInIntegrations]] + ), + auto_start=auto_start, + invalid_sample_skip_percentage=invalid_sample_skip_percentage, + job_type=job_type, + hyperparameters=utils.get_pydantic_model( + hyperparameters, models.Hyperparameters + ), + repositories=utils.get_pydantic_model( + repositories, OptionalNullable[List[models.JobInRepositories]] + ), + classifier_targets=utils.get_pydantic_model( + classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] + ), + ) + + req = self._build_request( + method="POST", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.JobIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + model: str, + hyperparameters: Union[ + models_jobin.Hyperparameters, models_jobin.HyperparametersTypedDict + ], + training_files: Optional[ + Union[ + List[models_trainingfile.TrainingFile], + List[models_trainingfile.TrainingFileTypedDict], + ] + ] = None, + validation_files: OptionalNullable[List[str]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + integrations: OptionalNullable[ + Union[ + List[models_jobin.JobInIntegrations], + List[models_jobin.JobInIntegrationsTypedDict], + ] + ] = UNSET, + auto_start: Optional[bool] = None, + invalid_sample_skip_percentage: Optional[float] = 0, + job_type: OptionalNullable[ + models_finetuneablemodeltype.FineTuneableModelType + ] = UNSET, + repositories: OptionalNullable[ + Union[ + List[models_jobin.JobInRepositories], + List[models_jobin.JobInRepositoriesTypedDict], + ] + ] = UNSET, + classifier_targets: OptionalNullable[ + Union[ + List[models_classifiertargetin.ClassifierTargetIn], + List[models_classifiertargetin.ClassifierTargetInTypedDict], + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: + r"""Create Fine Tuning Job + + Create a new fine-tuning job, it will be queued for processing. + + :param model: The name of the model to fine-tune. + :param hyperparameters: + :param training_files: + :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. + :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` + :param integrations: A list of integrations to enable for your fine-tuning job. + :param auto_start: This field will be required in a future release. + :param invalid_sample_skip_percentage: + :param job_type: + :param repositories: + :param classifier_targets: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobIn( + model=model, + training_files=utils.get_pydantic_model( + training_files, Optional[List[models.TrainingFile]] + ), + validation_files=validation_files, + suffix=suffix, + integrations=utils.get_pydantic_model( + integrations, OptionalNullable[List[models.JobInIntegrations]] + ), + auto_start=auto_start, + invalid_sample_skip_percentage=invalid_sample_skip_percentage, + job_type=job_type, + hyperparameters=utils.get_pydantic_model( + hyperparameters, models.Hyperparameters + ), + repositories=utils.get_pydantic_model( + repositories, OptionalNullable[List[models.JobInRepositories]] + ), + classifier_targets=utils.get_pydantic_model( + classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.JobIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: + r"""Get Fine Tuning Job + + Get a fine-tuned job details by its UUID. + + :param job_id: The ID of the job to analyse. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request( + method="GET", + path="/v1/fine_tuning/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: + r"""Get Fine Tuning Job + + Get a fine-tuned job details by its UUID. + + :param job_id: The ID of the job to analyse. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/fine_tuning/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def cancel( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: + r"""Cancel Fine Tuning Job + + Request the cancellation of a fine tuning job. + + :param job_id: The ID of the job to cancel. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def cancel_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: + r"""Cancel Fine Tuning Job + + Request the cancellation of a fine tuning job. + + :param job_id: The ID of the job to cancel. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def start( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: + r"""Start Fine Tuning Job + + Request the start of a validated fine tuning job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/start", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def start_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: + r"""Start Fine Tuning Job + + Request the start of a validated fine tuning job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/start", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/libraries.py b/src/mistralai/client/libraries.py new file mode 100644 index 00000000..03a54741 --- /dev/null +++ b/src/mistralai/client/libraries.py @@ -0,0 +1,946 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.accesses import Accesses +from mistralai.client.documents import Documents +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Mapping, Optional + + +class Libraries(BaseSDK): + r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" + + documents: Documents + r"""(beta) Libraries API - manage documents in a library.""" + accesses: Accesses + r"""(beta) Libraries API - manage access to a library.""" + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.documents = Documents(self.sdk_configuration, parent_ref=self.parent_ref) + self.accesses = Accesses(self.sdk_configuration, parent_ref=self.parent_ref) + + def list( + self, + *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListLibraryOut: + r"""List all libraries you have access to. + + List all libraries that you have created or have been shared with you. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + req = self._build_request( + method="GET", + path="/v1/libraries", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListLibraryOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListLibraryOut: + r"""List all libraries you have access to. + + List all libraries that you have created or have been shared with you. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + req = self._build_request_async( + method="GET", + path="/v1/libraries", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListLibraryOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def create( + self, + *, + name: str, + description: OptionalNullable[str] = UNSET, + chunk_size: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Create a new Library. + + Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. + + :param name: + :param description: + :param chunk_size: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibraryIn( + name=name, + description=description, + chunk_size=chunk_size, + ) + + req = self._build_request( + method="POST", + path="/v1/libraries", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.LibraryIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_create_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return unmarshal_json_response(models.LibraryOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + name: str, + description: OptionalNullable[str] = UNSET, + chunk_size: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Create a new Library. + + Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. + + :param name: + :param description: + :param chunk_size: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibraryIn( + name=name, + description=description, + chunk_size=chunk_size, + ) + + req = self._build_request_async( + method="POST", + path="/v1/libraries", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.LibraryIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_create_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return unmarshal_json_response(models.LibraryOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Detailed information about a specific Library. + + Given a library id, details information about that Library. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesGetV1Request( + library_id=library_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_get_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.LibraryOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Detailed information about a specific Library. + + Given a library id, details information about that Library. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesGetV1Request( + library_id=library_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_get_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.LibraryOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Delete a library and all of it's document. + + Given a library id, deletes it together with all documents that have been uploaded to that library. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDeleteV1Request( + library_id=library_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.LibraryOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Delete a library and all of it's document. + + Given a library id, deletes it together with all documents that have been uploaded to that library. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDeleteV1Request( + library_id=library_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.LibraryOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def update( + self, + *, + library_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Update a library. + + Given a library id, you can update the name and description. + + :param library_id: + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesUpdateV1Request( + library_id=library_id, + library_in_update=models.LibraryInUpdate( + name=name, + description=description, + ), + ) + + req = self._build_request( + method="PUT", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.library_in_update, False, False, "json", models.LibraryInUpdate + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_update_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.LibraryOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def update_async( + self, + *, + library_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Update a library. + + Given a library id, you can update the name and description. + + :param library_id: + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesUpdateV1Request( + library_id=library_id, + library_in_update=models.LibraryInUpdate( + name=name, + description=description, + ), + ) + + req = self._build_request_async( + method="PUT", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.library_in_update, False, False, "json", models.LibraryInUpdate + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_update_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.LibraryOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/mistral_agents.py b/src/mistralai/client/mistral_agents.py new file mode 100644 index 00000000..2ac7a29e --- /dev/null +++ b/src/mistralai/client/mistral_agents.py @@ -0,0 +1,2080 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + agentcreationrequest as models_agentcreationrequest, + agents_api_v1_agents_getop as models_agents_api_v1_agents_getop, + agentupdaterequest as models_agentupdaterequest, + completionargs as models_completionargs, + requestsource as models_requestsource, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class MistralAgents(BaseSDK): + r"""(beta) Agents API""" + + def create( + self, + *, + model: str, + name: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models_agentcreationrequest.AgentCreationRequestTools], + List[models_agentcreationrequest.AgentCreationRequestToolsTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Create a agent that can be used within a conversation. + + Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. + + :param model: + :param name: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param description: + :param handoffs: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentCreationRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.AgentCreationRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + metadata=metadata, + ) + + req = self._build_request( + method="POST", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentCreationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + model: str, + name: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models_agentcreationrequest.AgentCreationRequestTools], + List[models_agentcreationrequest.AgentCreationRequestToolsTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Create a agent that can be used within a conversation. + + Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. + + :param model: + :param name: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param description: + :param handoffs: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentCreationRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.AgentCreationRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + metadata=metadata, + ) + + req = self._build_request_async( + method="POST", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentCreationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + deployment_chat: OptionalNullable[bool] = UNSET, + sources: OptionalNullable[List[models_requestsource.RequestSource]] = UNSET, + name: OptionalNullable[str] = UNSET, + id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List agent entities. + + Retrieve a list of agent entities sorted by creation time. + + :param page: Page number (0-indexed) + :param page_size: Number of agents per page + :param deployment_chat: + :param sources: + :param name: + :param id: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListRequest( + page=page, + page_size=page_size, + deployment_chat=deployment_chat, + sources=sources, + name=name, + id=id, + metadata=metadata, + ) + + req = self._build_request( + method="GET", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.Agent], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + deployment_chat: OptionalNullable[bool] = UNSET, + sources: OptionalNullable[List[models_requestsource.RequestSource]] = UNSET, + name: OptionalNullable[str] = UNSET, + id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List agent entities. + + Retrieve a list of agent entities sorted by creation time. + + :param page: Page number (0-indexed) + :param page_size: Number of agents per page + :param deployment_chat: + :param sources: + :param name: + :param id: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListRequest( + page=page, + page_size=page_size, + deployment_chat=deployment_chat, + sources=sources, + name=name, + id=id, + metadata=metadata, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.Agent], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + agent_id: str, + agent_version: OptionalNullable[ + Union[ + models_agents_api_v1_agents_getop.QueryParamAgentVersion, + models_agents_api_v1_agents_getop.QueryParamAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve an agent entity. + + Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. + + :param agent_id: + :param agent_version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetRequest( + agent_id=agent_id, + agent_version=agent_version, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + agent_id: str, + agent_version: OptionalNullable[ + Union[ + models_agents_api_v1_agents_getop.QueryParamAgentVersion, + models_agents_api_v1_agents_getop.QueryParamAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve an agent entity. + + Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. + + :param agent_id: + :param agent_version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetRequest( + agent_id=agent_id, + agent_version=agent_version, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def update( + self, + *, + agent_id: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models_agentupdaterequest.AgentUpdateRequestTools], + List[models_agentupdaterequest.AgentUpdateRequestToolsTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + model: OptionalNullable[str] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + deployment_chat: OptionalNullable[bool] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent entity. + + Update an agent attributes and create a new version. + + :param agent_id: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param model: + :param name: + :param description: + :param handoffs: + :param deployment_chat: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateRequest( + agent_id=agent_id, + agent_update_request=models.AgentUpdateRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.AgentUpdateRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + deployment_chat=deployment_chat, + metadata=metadata, + ), + ) + + req = self._build_request( + method="PATCH", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.agent_update_request, + False, + False, + "json", + models.AgentUpdateRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_update", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def update_async( + self, + *, + agent_id: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models_agentupdaterequest.AgentUpdateRequestTools], + List[models_agentupdaterequest.AgentUpdateRequestToolsTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + model: OptionalNullable[str] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + deployment_chat: OptionalNullable[bool] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent entity. + + Update an agent attributes and create a new version. + + :param agent_id: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param model: + :param name: + :param description: + :param handoffs: + :param deployment_chat: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateRequest( + agent_id=agent_id, + agent_update_request=models.AgentUpdateRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.AgentUpdateRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + deployment_chat=deployment_chat, + metadata=metadata, + ), + ) + + req = self._build_request_async( + method="PATCH", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.agent_update_request, + False, + False, + "json", + models.AgentUpdateRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_update", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent entity. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsDeleteRequest( + agent_id=agent_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent entity. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsDeleteRequest( + agent_id=agent_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def update_version( + self, + *, + agent_id: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent version. + + Switch the version of an agent. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request( + method="PATCH", + path="/v1/agents/{agent_id}/version", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_update_version", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def update_version_async( + self, + *, + agent_id: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent version. + + Switch the version of an agent. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request_async( + method="PATCH", + path="/v1/agents/{agent_id}/version", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_update_version", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def list_versions( + self, + *, + agent_id: str, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List all versions of an agent. + + Retrieve all versions for a specific agent with full agent context. Supports pagination. + + :param agent_id: + :param page: Page number (0-indexed) + :param page_size: Number of versions per page + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionsRequest( + agent_id=agent_id, + page=page, + page_size=page_size, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}/versions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_versions", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.Agent], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_versions_async( + self, + *, + agent_id: str, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List all versions of an agent. + + Retrieve all versions for a specific agent with full agent context. Supports pagination. + + :param agent_id: + :param page: Page number (0-indexed) + :param page_size: Number of versions per page + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionsRequest( + agent_id=agent_id, + page=page, + page_size=page_size, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}/versions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_versions", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.Agent], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get_version( + self, + *, + agent_id: str, + version: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve a specific version of an agent. + + Get a specific agent version by version number. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}/versions/{version}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_get_version", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_version_async( + self, + *, + agent_id: str, + version: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve a specific version of an agent. + + Get a specific agent version by version number. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}/versions/{version}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_get_version", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def create_version_alias( + self, + *, + agent_id: str, + alias: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentAliasResponse: + r"""Create or update an agent version alias. + + Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. + + :param agent_id: + :param alias: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( + agent_id=agent_id, + alias=alias, + version=version, + ) + + req = self._build_request( + method="PUT", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create_or_update_alias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.AgentAliasResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def create_version_alias_async( + self, + *, + agent_id: str, + alias: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentAliasResponse: + r"""Create or update an agent version alias. + + Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. + + :param agent_id: + :param alias: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( + agent_id=agent_id, + alias=alias, + version=version, + ) + + req = self._build_request_async( + method="PUT", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create_or_update_alias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.AgentAliasResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def list_version_aliases( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.AgentAliasResponse]: + r"""List all aliases for an agent. + + Retrieve all version aliases for a specific agent. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionAliasesRequest( + agent_id=agent_id, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_version_aliases", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.AgentAliasResponse], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_version_aliases_async( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.AgentAliasResponse]: + r"""List all aliases for an agent. + + Retrieve all version aliases for a specific agent. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionAliasesRequest( + agent_id=agent_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_version_aliases", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.AgentAliasResponse], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/mistral_jobs.py b/src/mistralai/client/mistral_jobs.py new file mode 100644 index 00000000..eae44033 --- /dev/null +++ b/src/mistralai/client/mistral_jobs.py @@ -0,0 +1,799 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from datetime import datetime +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + apiendpoint as models_apiendpoint, + batchjobstatus as models_batchjobstatus, + batchrequest as models_batchrequest, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class MistralJobs(BaseSDK): + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJobsOut: + r"""Get Batch Jobs + + Get a list of batch jobs for your organization and user. + + :param page: + :param page_size: + :param model: + :param agent_id: + :param metadata: + :param created_after: + :param created_by_me: + :param status: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchGetBatchJobsRequest( + page=page, + page_size=page_size, + model=model, + agent_id=agent_id, + metadata=metadata, + created_after=created_after, + created_by_me=created_by_me, + status=status, + ) + + req = self._build_request( + method="GET", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_get_batch_jobs", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJobsOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJobsOut: + r"""Get Batch Jobs + + Get a list of batch jobs for your organization and user. + + :param page: + :param page_size: + :param model: + :param agent_id: + :param metadata: + :param created_after: + :param created_by_me: + :param status: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchGetBatchJobsRequest( + page=page, + page_size=page_size, + model=model, + agent_id=agent_id, + metadata=metadata, + created_after=created_after, + created_by_me=created_by_me, + status=status, + ) + + req = self._build_request_async( + method="GET", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_get_batch_jobs", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJobsOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def create( + self, + *, + endpoint: models_apiendpoint.APIEndpoint, + input_files: OptionalNullable[List[str]] = UNSET, + requests: OptionalNullable[ + Union[ + List[models_batchrequest.BatchRequest], + List[models_batchrequest.BatchRequestTypedDict], + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, str]] = UNSET, + timeout_hours: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJobOut: + r"""Create Batch Job + + Create a new batch job, it will be queued for processing. + + :param endpoint: + :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` + :param requests: + :param model: The model to be used for batch inference. + :param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. + :param metadata: The metadata of your choice to be associated with the batch inference job. + :param timeout_hours: The timeout in hours for the batch inference job. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.BatchJobIn( + input_files=input_files, + requests=utils.get_pydantic_model( + requests, OptionalNullable[List[models.BatchRequest]] + ), + endpoint=endpoint, + model=model, + agent_id=agent_id, + metadata=metadata, + timeout_hours=timeout_hours, + ) + + req = self._build_request( + method="POST", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.BatchJobIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_create_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJobOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + endpoint: models_apiendpoint.APIEndpoint, + input_files: OptionalNullable[List[str]] = UNSET, + requests: OptionalNullable[ + Union[ + List[models_batchrequest.BatchRequest], + List[models_batchrequest.BatchRequestTypedDict], + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, str]] = UNSET, + timeout_hours: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJobOut: + r"""Create Batch Job + + Create a new batch job, it will be queued for processing. + + :param endpoint: + :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` + :param requests: + :param model: The model to be used for batch inference. + :param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. + :param metadata: The metadata of your choice to be associated with the batch inference job. + :param timeout_hours: The timeout in hours for the batch inference job. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.BatchJobIn( + input_files=input_files, + requests=utils.get_pydantic_model( + requests, OptionalNullable[List[models.BatchRequest]] + ), + endpoint=endpoint, + model=model, + agent_id=agent_id, + metadata=metadata, + timeout_hours=timeout_hours, + ) + + req = self._build_request_async( + method="POST", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.BatchJobIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_create_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJobOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + job_id: str, + inline: OptionalNullable[bool] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJobOut: + r"""Get Batch Job + + Get a batch job details by its UUID. + + Args: + inline: If True, return results inline in the response. + + :param job_id: + :param inline: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchGetBatchJobRequest( + job_id=job_id, + inline=inline, + ) + + req = self._build_request( + method="GET", + path="/v1/batch/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_get_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJobOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + job_id: str, + inline: OptionalNullable[bool] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJobOut: + r"""Get Batch Job + + Get a batch job details by its UUID. + + Args: + inline: If True, return results inline in the response. + + :param job_id: + :param inline: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchGetBatchJobRequest( + job_id=job_id, + inline=inline, + ) + + req = self._build_request_async( + method="GET", + path="/v1/batch/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_get_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJobOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def cancel( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJobOut: + r"""Cancel Batch Job + + Request the cancellation of a batch job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchCancelBatchJobRequest( + job_id=job_id, + ) + + req = self._build_request( + method="POST", + path="/v1/batch/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_cancel_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJobOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def cancel_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJobOut: + r"""Cancel Batch Job + + Request the cancellation of a batch job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchCancelBatchJobRequest( + job_id=job_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/batch/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_cancel_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJobOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/models/__init__.py b/src/mistralai/client/models/__init__.py new file mode 100644 index 00000000..23e65222 --- /dev/null +++ b/src/mistralai/client/models/__init__.py @@ -0,0 +1,2531 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .mistralerror import MistralError +from typing import TYPE_CHECKING +from importlib import import_module +import builtins +import sys + +if TYPE_CHECKING: + from .agent import ( + Agent, + AgentObject, + AgentTools, + AgentToolsTypedDict, + AgentTypedDict, + ) + from .agentaliasresponse import AgentAliasResponse, AgentAliasResponseTypedDict + from .agentconversation import ( + AgentConversation, + AgentConversationAgentVersion, + AgentConversationAgentVersionTypedDict, + AgentConversationObject, + AgentConversationTypedDict, + ) + from .agentcreationrequest import ( + AgentCreationRequest, + AgentCreationRequestTools, + AgentCreationRequestToolsTypedDict, + AgentCreationRequestTypedDict, + ) + from .agenthandoffdoneevent import ( + AgentHandoffDoneEvent, + AgentHandoffDoneEventType, + AgentHandoffDoneEventTypedDict, + ) + from .agenthandoffentry import ( + AgentHandoffEntry, + AgentHandoffEntryObject, + AgentHandoffEntryType, + AgentHandoffEntryTypedDict, + ) + from .agenthandoffstartedevent import ( + AgentHandoffStartedEvent, + AgentHandoffStartedEventType, + AgentHandoffStartedEventTypedDict, + ) + from .agents_api_v1_agents_create_or_update_aliasop import ( + AgentsAPIV1AgentsCreateOrUpdateAliasRequest, + AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict, + ) + from .agents_api_v1_agents_deleteop import ( + AgentsAPIV1AgentsDeleteRequest, + AgentsAPIV1AgentsDeleteRequestTypedDict, + ) + from .agents_api_v1_agents_get_versionop import ( + AgentsAPIV1AgentsGetVersionRequest, + AgentsAPIV1AgentsGetVersionRequestTypedDict, + ) + from .agents_api_v1_agents_getop import ( + AgentsAPIV1AgentsGetRequest, + AgentsAPIV1AgentsGetRequestTypedDict, + QueryParamAgentVersion, + QueryParamAgentVersionTypedDict, + ) + from .agents_api_v1_agents_list_version_aliasesop import ( + AgentsAPIV1AgentsListVersionAliasesRequest, + AgentsAPIV1AgentsListVersionAliasesRequestTypedDict, + ) + from .agents_api_v1_agents_list_versionsop import ( + AgentsAPIV1AgentsListVersionsRequest, + AgentsAPIV1AgentsListVersionsRequestTypedDict, + ) + from .agents_api_v1_agents_listop import ( + AgentsAPIV1AgentsListRequest, + AgentsAPIV1AgentsListRequestTypedDict, + ) + from .agents_api_v1_agents_update_versionop import ( + AgentsAPIV1AgentsUpdateVersionRequest, + AgentsAPIV1AgentsUpdateVersionRequestTypedDict, + ) + from .agents_api_v1_agents_updateop import ( + AgentsAPIV1AgentsUpdateRequest, + AgentsAPIV1AgentsUpdateRequestTypedDict, + ) + from .agents_api_v1_conversations_append_streamop import ( + AgentsAPIV1ConversationsAppendStreamRequest, + AgentsAPIV1ConversationsAppendStreamRequestTypedDict, + ) + from .agents_api_v1_conversations_appendop import ( + AgentsAPIV1ConversationsAppendRequest, + AgentsAPIV1ConversationsAppendRequestTypedDict, + ) + from .agents_api_v1_conversations_deleteop import ( + AgentsAPIV1ConversationsDeleteRequest, + AgentsAPIV1ConversationsDeleteRequestTypedDict, + ) + from .agents_api_v1_conversations_getop import ( + AgentsAPIV1ConversationsGetRequest, + AgentsAPIV1ConversationsGetRequestTypedDict, + AgentsAPIV1ConversationsGetResponseV1ConversationsGet, + AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict, + ) + from .agents_api_v1_conversations_historyop import ( + AgentsAPIV1ConversationsHistoryRequest, + AgentsAPIV1ConversationsHistoryRequestTypedDict, + ) + from .agents_api_v1_conversations_listop import ( + AgentsAPIV1ConversationsListRequest, + AgentsAPIV1ConversationsListRequestTypedDict, + ResponseBody, + ResponseBodyTypedDict, + ) + from .agents_api_v1_conversations_messagesop import ( + AgentsAPIV1ConversationsMessagesRequest, + AgentsAPIV1ConversationsMessagesRequestTypedDict, + ) + from .agents_api_v1_conversations_restart_streamop import ( + AgentsAPIV1ConversationsRestartStreamRequest, + AgentsAPIV1ConversationsRestartStreamRequestTypedDict, + ) + from .agents_api_v1_conversations_restartop import ( + AgentsAPIV1ConversationsRestartRequest, + AgentsAPIV1ConversationsRestartRequestTypedDict, + ) + from .agentscompletionrequest import ( + AgentsCompletionRequest, + AgentsCompletionRequestMessages, + AgentsCompletionRequestMessagesTypedDict, + AgentsCompletionRequestStop, + AgentsCompletionRequestStopTypedDict, + AgentsCompletionRequestToolChoice, + AgentsCompletionRequestToolChoiceTypedDict, + AgentsCompletionRequestTypedDict, + ) + from .agentscompletionstreamrequest import ( + AgentsCompletionStreamRequest, + AgentsCompletionStreamRequestMessages, + AgentsCompletionStreamRequestMessagesTypedDict, + AgentsCompletionStreamRequestStop, + AgentsCompletionStreamRequestStopTypedDict, + AgentsCompletionStreamRequestToolChoice, + AgentsCompletionStreamRequestToolChoiceTypedDict, + AgentsCompletionStreamRequestTypedDict, + ) + from .agentupdaterequest import ( + AgentUpdateRequest, + AgentUpdateRequestTools, + AgentUpdateRequestToolsTypedDict, + AgentUpdateRequestTypedDict, + ) + from .apiendpoint import APIEndpoint + from .archiveftmodelout import ( + ArchiveFTModelOut, + ArchiveFTModelOutObject, + ArchiveFTModelOutTypedDict, + ) + from .assistantmessage import ( + AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, + AssistantMessageRole, + AssistantMessageTypedDict, + ) + from .audiochunk import AudioChunk, AudioChunkType, AudioChunkTypedDict + from .audioencoding import AudioEncoding + from .audioformat import AudioFormat, AudioFormatTypedDict + from .audiotranscriptionrequest import ( + AudioTranscriptionRequest, + AudioTranscriptionRequestTypedDict, + ) + from .audiotranscriptionrequeststream import ( + AudioTranscriptionRequestStream, + AudioTranscriptionRequestStreamTypedDict, + ) + from .basemodelcard import BaseModelCard, BaseModelCardType, BaseModelCardTypedDict + from .batcherror import BatchError, BatchErrorTypedDict + from .batchjobin import BatchJobIn, BatchJobInTypedDict + from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict + from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict + from .batchjobstatus import BatchJobStatus + from .batchrequest import BatchRequest, BatchRequestTypedDict + from .builtinconnectors import BuiltInConnectors + from .chatclassificationrequest import ( + ChatClassificationRequest, + ChatClassificationRequestTypedDict, + ) + from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceTypedDict, + FinishReason, + ) + from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, + Messages, + MessagesTypedDict, + Stop, + StopTypedDict, + ) + from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, + ) + from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestMessages, + ChatCompletionStreamRequestMessagesTypedDict, + ChatCompletionStreamRequestStop, + ChatCompletionStreamRequestStopTypedDict, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + ) + from .chatmoderationrequest import ( + ChatModerationRequest, + ChatModerationRequestInputs, + ChatModerationRequestInputsTypedDict, + ChatModerationRequestTypedDict, + One, + OneTypedDict, + Two, + TwoTypedDict, + ) + from .checkpointout import CheckpointOut, CheckpointOutTypedDict + from .classificationrequest import ( + ClassificationRequest, + ClassificationRequestInputs, + ClassificationRequestInputsTypedDict, + ClassificationRequestTypedDict, + ) + from .classificationresponse import ( + ClassificationResponse, + ClassificationResponseTypedDict, + ) + from .classificationtargetresult import ( + ClassificationTargetResult, + ClassificationTargetResultTypedDict, + ) + from .classifierdetailedjobout import ( + ClassifierDetailedJobOut, + ClassifierDetailedJobOutIntegrations, + ClassifierDetailedJobOutIntegrationsTypedDict, + ClassifierDetailedJobOutJobType, + ClassifierDetailedJobOutObject, + ClassifierDetailedJobOutStatus, + ClassifierDetailedJobOutTypedDict, + ) + from .classifierftmodelout import ( + ClassifierFTModelOut, + ClassifierFTModelOutModelType, + ClassifierFTModelOutObject, + ClassifierFTModelOutTypedDict, + ) + from .classifierjobout import ( + ClassifierJobOut, + ClassifierJobOutIntegrations, + ClassifierJobOutIntegrationsTypedDict, + ClassifierJobOutJobType, + ClassifierJobOutObject, + ClassifierJobOutStatus, + ClassifierJobOutTypedDict, + ) + from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict + from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict + from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, + ) + from .classifiertrainingparametersin import ( + ClassifierTrainingParametersIn, + ClassifierTrainingParametersInTypedDict, + ) + from .codeinterpretertool import ( + CodeInterpreterTool, + CodeInterpreterToolType, + CodeInterpreterToolTypedDict, + ) + from .completionargs import CompletionArgs, CompletionArgsTypedDict + from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict + from .completionchunk import CompletionChunk, CompletionChunkTypedDict + from .completiondetailedjobout import ( + CompletionDetailedJobOut, + CompletionDetailedJobOutIntegrations, + CompletionDetailedJobOutIntegrationsTypedDict, + CompletionDetailedJobOutJobType, + CompletionDetailedJobOutObject, + CompletionDetailedJobOutRepositories, + CompletionDetailedJobOutRepositoriesTypedDict, + CompletionDetailedJobOutStatus, + CompletionDetailedJobOutTypedDict, + ) + from .completionevent import CompletionEvent, CompletionEventTypedDict + from .completionftmodelout import ( + CompletionFTModelOut, + CompletionFTModelOutObject, + CompletionFTModelOutTypedDict, + ModelType, + ) + from .completionjobout import ( + CompletionJobOut, + CompletionJobOutObject, + CompletionJobOutTypedDict, + Integrations, + IntegrationsTypedDict, + JobType, + Repositories, + RepositoriesTypedDict, + Status, + ) + from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceFinishReason, + CompletionResponseStreamChoiceTypedDict, + ) + from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, + ) + from .completiontrainingparametersin import ( + CompletionTrainingParametersIn, + CompletionTrainingParametersInTypedDict, + ) + from .contentchunk import ContentChunk, ContentChunkTypedDict + from .conversationappendrequest import ( + ConversationAppendRequest, + ConversationAppendRequestHandoffExecution, + ConversationAppendRequestTypedDict, + ) + from .conversationappendstreamrequest import ( + ConversationAppendStreamRequest, + ConversationAppendStreamRequestHandoffExecution, + ConversationAppendStreamRequestTypedDict, + ) + from .conversationevents import ( + ConversationEvents, + ConversationEventsData, + ConversationEventsDataTypedDict, + ConversationEventsTypedDict, + ) + from .conversationhistory import ( + ConversationHistory, + ConversationHistoryObject, + ConversationHistoryTypedDict, + Entries, + EntriesTypedDict, + ) + from .conversationinputs import ConversationInputs, ConversationInputsTypedDict + from .conversationmessages import ( + ConversationMessages, + ConversationMessagesObject, + ConversationMessagesTypedDict, + ) + from .conversationrequest import ( + AgentVersion, + AgentVersionTypedDict, + ConversationRequest, + ConversationRequestTypedDict, + HandoffExecution, + Tools, + ToolsTypedDict, + ) + from .conversationresponse import ( + ConversationResponse, + ConversationResponseObject, + ConversationResponseTypedDict, + Outputs, + OutputsTypedDict, + ) + from .conversationrestartrequest import ( + ConversationRestartRequest, + ConversationRestartRequestAgentVersion, + ConversationRestartRequestAgentVersionTypedDict, + ConversationRestartRequestHandoffExecution, + ConversationRestartRequestTypedDict, + ) + from .conversationrestartstreamrequest import ( + ConversationRestartStreamRequest, + ConversationRestartStreamRequestAgentVersion, + ConversationRestartStreamRequestAgentVersionTypedDict, + ConversationRestartStreamRequestHandoffExecution, + ConversationRestartStreamRequestTypedDict, + ) + from .conversationstreamrequest import ( + ConversationStreamRequest, + ConversationStreamRequestAgentVersion, + ConversationStreamRequestAgentVersionTypedDict, + ConversationStreamRequestHandoffExecution, + ConversationStreamRequestTools, + ConversationStreamRequestToolsTypedDict, + ConversationStreamRequestTypedDict, + ) + from .conversationusageinfo import ( + ConversationUsageInfo, + ConversationUsageInfoTypedDict, + ) + from .delete_model_v1_models_model_id_deleteop import ( + DeleteModelV1ModelsModelIDDeleteRequest, + DeleteModelV1ModelsModelIDDeleteRequestTypedDict, + ) + from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict + from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict + from .deltamessage import ( + Content, + ContentTypedDict, + DeltaMessage, + DeltaMessageTypedDict, + ) + from .documentlibrarytool import ( + DocumentLibraryTool, + DocumentLibraryToolType, + DocumentLibraryToolTypedDict, + ) + from .documentout import DocumentOut, DocumentOutTypedDict + from .documenttextcontent import DocumentTextContent, DocumentTextContentTypedDict + from .documentupdatein import ( + Attributes, + AttributesTypedDict, + DocumentUpdateIn, + DocumentUpdateInTypedDict, + ) + from .documenturlchunk import ( + DocumentURLChunk, + DocumentURLChunkType, + DocumentURLChunkTypedDict, + ) + from .embeddingdtype import EmbeddingDtype + from .embeddingrequest import ( + EmbeddingRequest, + EmbeddingRequestInputs, + EmbeddingRequestInputsTypedDict, + EmbeddingRequestTypedDict, + ) + from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict + from .embeddingresponsedata import ( + EmbeddingResponseData, + EmbeddingResponseDataTypedDict, + ) + from .encodingformat import EncodingFormat + from .entitytype import EntityType + from .eventout import EventOut, EventOutTypedDict + from .file import File, FileTypedDict + from .filechunk import FileChunk, FileChunkTypedDict + from .filepurpose import FilePurpose + from .files_api_routes_delete_fileop import ( + FilesAPIRoutesDeleteFileRequest, + FilesAPIRoutesDeleteFileRequestTypedDict, + ) + from .files_api_routes_download_fileop import ( + FilesAPIRoutesDownloadFileRequest, + FilesAPIRoutesDownloadFileRequestTypedDict, + ) + from .files_api_routes_get_signed_urlop import ( + FilesAPIRoutesGetSignedURLRequest, + FilesAPIRoutesGetSignedURLRequestTypedDict, + ) + from .files_api_routes_list_filesop import ( + FilesAPIRoutesListFilesRequest, + FilesAPIRoutesListFilesRequestTypedDict, + ) + from .files_api_routes_retrieve_fileop import ( + FilesAPIRoutesRetrieveFileRequest, + FilesAPIRoutesRetrieveFileRequestTypedDict, + ) + from .files_api_routes_upload_fileop import ( + FilesAPIRoutesUploadFileMultiPartBodyParams, + FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, + ) + from .fileschema import FileSchema, FileSchemaTypedDict + from .filesignedurl import FileSignedURL, FileSignedURLTypedDict + from .fimcompletionrequest import ( + FIMCompletionRequest, + FIMCompletionRequestStop, + FIMCompletionRequestStopTypedDict, + FIMCompletionRequestTypedDict, + ) + from .fimcompletionresponse import ( + FIMCompletionResponse, + FIMCompletionResponseTypedDict, + ) + from .fimcompletionstreamrequest import ( + FIMCompletionStreamRequest, + FIMCompletionStreamRequestStop, + FIMCompletionStreamRequestStopTypedDict, + FIMCompletionStreamRequestTypedDict, + ) + from .finetuneablemodeltype import FineTuneableModelType + from .ftclassifierlossfunction import FTClassifierLossFunction + from .ftmodelcapabilitiesout import ( + FTModelCapabilitiesOut, + FTModelCapabilitiesOutTypedDict, + ) + from .ftmodelcard import FTModelCard, FTModelCardType, FTModelCardTypedDict + from .function import Function, FunctionTypedDict + from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, + ) + from .functioncallentry import ( + FunctionCallEntry, + FunctionCallEntryObject, + FunctionCallEntryType, + FunctionCallEntryTypedDict, + ) + from .functioncallentryarguments import ( + FunctionCallEntryArguments, + FunctionCallEntryArgumentsTypedDict, + ) + from .functioncallevent import ( + FunctionCallEvent, + FunctionCallEventType, + FunctionCallEventTypedDict, + ) + from .functionname import FunctionName, FunctionNameTypedDict + from .functionresultentry import ( + FunctionResultEntry, + FunctionResultEntryObject, + FunctionResultEntryType, + FunctionResultEntryTypedDict, + ) + from .functiontool import FunctionTool, FunctionToolType, FunctionToolTypedDict + from .githubrepositoryin import ( + GithubRepositoryIn, + GithubRepositoryInType, + GithubRepositoryInTypedDict, + ) + from .githubrepositoryout import ( + GithubRepositoryOut, + GithubRepositoryOutType, + GithubRepositoryOutTypedDict, + ) + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .imagegenerationtool import ( + ImageGenerationTool, + ImageGenerationToolType, + ImageGenerationToolTypedDict, + ) + from .imageurl import ImageURL, ImageURLTypedDict + from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkImageURL, + ImageURLChunkImageURLTypedDict, + ImageURLChunkType, + ImageURLChunkTypedDict, + ) + from .inputentries import InputEntries, InputEntriesTypedDict + from .inputs import ( + Inputs, + InputsTypedDict, + InstructRequestInputs, + InstructRequestInputsMessages, + InstructRequestInputsMessagesTypedDict, + InstructRequestInputsTypedDict, + ) + from .instructrequest import ( + InstructRequest, + InstructRequestMessages, + InstructRequestMessagesTypedDict, + InstructRequestTypedDict, + ) + from .jobin import ( + Hyperparameters, + HyperparametersTypedDict, + JobIn, + JobInIntegrations, + JobInIntegrationsTypedDict, + JobInRepositories, + JobInRepositoriesTypedDict, + JobInTypedDict, + ) + from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict + from .jobs_api_routes_batch_cancel_batch_jobop import ( + JobsAPIRoutesBatchCancelBatchJobRequest, + JobsAPIRoutesBatchCancelBatchJobRequestTypedDict, + ) + from .jobs_api_routes_batch_get_batch_jobop import ( + JobsAPIRoutesBatchGetBatchJobRequest, + JobsAPIRoutesBatchGetBatchJobRequestTypedDict, + ) + from .jobs_api_routes_batch_get_batch_jobsop import ( + JobsAPIRoutesBatchGetBatchJobsRequest, + JobsAPIRoutesBatchGetBatchJobsRequestTypedDict, + ) + from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, + JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict, + ) + from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningCancelFineTuningJobRequest, + JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningCancelFineTuningJobResponse, + JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict, + ) + from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningCreateFineTuningJobResponse, + JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, + Response1, + Response1TypedDict, + ) + from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningGetFineTuningJobRequest, + JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningGetFineTuningJobResponse, + JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict, + ) + from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( + JobsAPIRoutesFineTuningGetFineTuningJobsRequest, + JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, + QueryParamStatus, + ) + from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningStartFineTuningJobRequest, + JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningStartFineTuningJobResponse, + JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict, + ) + from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, + JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict, + ) + from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, + JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, + JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict, + ) + from .jobsout import ( + JobsOut, + JobsOutData, + JobsOutDataTypedDict, + JobsOutObject, + JobsOutTypedDict, + ) + from .jsonschema import JSONSchema, JSONSchemaTypedDict + from .legacyjobmetadataout import ( + LegacyJobMetadataOut, + LegacyJobMetadataOutObject, + LegacyJobMetadataOutTypedDict, + ) + from .libraries_delete_v1op import ( + LibrariesDeleteV1Request, + LibrariesDeleteV1RequestTypedDict, + ) + from .libraries_documents_delete_v1op import ( + LibrariesDocumentsDeleteV1Request, + LibrariesDocumentsDeleteV1RequestTypedDict, + ) + from .libraries_documents_get_extracted_text_signed_url_v1op import ( + LibrariesDocumentsGetExtractedTextSignedURLV1Request, + LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict, + ) + from .libraries_documents_get_signed_url_v1op import ( + LibrariesDocumentsGetSignedURLV1Request, + LibrariesDocumentsGetSignedURLV1RequestTypedDict, + ) + from .libraries_documents_get_status_v1op import ( + LibrariesDocumentsGetStatusV1Request, + LibrariesDocumentsGetStatusV1RequestTypedDict, + ) + from .libraries_documents_get_text_content_v1op import ( + LibrariesDocumentsGetTextContentV1Request, + LibrariesDocumentsGetTextContentV1RequestTypedDict, + ) + from .libraries_documents_get_v1op import ( + LibrariesDocumentsGetV1Request, + LibrariesDocumentsGetV1RequestTypedDict, + ) + from .libraries_documents_list_v1op import ( + LibrariesDocumentsListV1Request, + LibrariesDocumentsListV1RequestTypedDict, + ) + from .libraries_documents_reprocess_v1op import ( + LibrariesDocumentsReprocessV1Request, + LibrariesDocumentsReprocessV1RequestTypedDict, + ) + from .libraries_documents_update_v1op import ( + LibrariesDocumentsUpdateV1Request, + LibrariesDocumentsUpdateV1RequestTypedDict, + ) + from .libraries_documents_upload_v1op import ( + LibrariesDocumentsUploadV1DocumentUpload, + LibrariesDocumentsUploadV1DocumentUploadTypedDict, + LibrariesDocumentsUploadV1Request, + LibrariesDocumentsUploadV1RequestTypedDict, + ) + from .libraries_get_v1op import ( + LibrariesGetV1Request, + LibrariesGetV1RequestTypedDict, + ) + from .libraries_share_create_v1op import ( + LibrariesShareCreateV1Request, + LibrariesShareCreateV1RequestTypedDict, + ) + from .libraries_share_delete_v1op import ( + LibrariesShareDeleteV1Request, + LibrariesShareDeleteV1RequestTypedDict, + ) + from .libraries_share_list_v1op import ( + LibrariesShareListV1Request, + LibrariesShareListV1RequestTypedDict, + ) + from .libraries_update_v1op import ( + LibrariesUpdateV1Request, + LibrariesUpdateV1RequestTypedDict, + ) + from .libraryin import LibraryIn, LibraryInTypedDict + from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict + from .libraryout import LibraryOut, LibraryOutTypedDict + from .listdocumentout import ListDocumentOut, ListDocumentOutTypedDict + from .listfilesout import ListFilesOut, ListFilesOutTypedDict + from .listlibraryout import ListLibraryOut, ListLibraryOutTypedDict + from .listsharingout import ListSharingOut, ListSharingOutTypedDict + from .messageentries import MessageEntries, MessageEntriesTypedDict + from .messageinputcontentchunks import ( + MessageInputContentChunks, + MessageInputContentChunksTypedDict, + ) + from .messageinputentry import ( + MessageInputEntry, + MessageInputEntryContent, + MessageInputEntryContentTypedDict, + MessageInputEntryRole, + MessageInputEntryType, + MessageInputEntryTypedDict, + Object, + ) + from .messageoutputcontentchunks import ( + MessageOutputContentChunks, + MessageOutputContentChunksTypedDict, + ) + from .messageoutputentry import ( + MessageOutputEntry, + MessageOutputEntryContent, + MessageOutputEntryContentTypedDict, + MessageOutputEntryObject, + MessageOutputEntryRole, + MessageOutputEntryType, + MessageOutputEntryTypedDict, + ) + from .messageoutputevent import ( + MessageOutputEvent, + MessageOutputEventContent, + MessageOutputEventContentTypedDict, + MessageOutputEventRole, + MessageOutputEventType, + MessageOutputEventTypedDict, + ) + from .metricout import MetricOut, MetricOutTypedDict + from .mistralpromptmode import MistralPromptMode + from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict + from .modelconversation import ( + ModelConversation, + ModelConversationObject, + ModelConversationTools, + ModelConversationToolsTypedDict, + ModelConversationTypedDict, + ) + from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict + from .moderationobject import ModerationObject, ModerationObjectTypedDict + from .moderationresponse import ModerationResponse, ModerationResponseTypedDict + from .no_response_error import NoResponseError + from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict + from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict + from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict + from .ocrrequest import ( + Document, + DocumentTypedDict, + OCRRequest, + OCRRequestTypedDict, + TableFormat, + ) + from .ocrresponse import OCRResponse, OCRResponseTypedDict + from .ocrtableobject import Format, OCRTableObject, OCRTableObjectTypedDict + from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict + from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict + from .paginationinfo import PaginationInfo, PaginationInfoTypedDict + from .prediction import Prediction, PredictionTypedDict + from .processingstatusout import ProcessingStatusOut, ProcessingStatusOutTypedDict + from .realtimetranscriptionerror import ( + RealtimeTranscriptionError, + RealtimeTranscriptionErrorTypedDict, + ) + from .realtimetranscriptionerrordetail import ( + Message, + MessageTypedDict, + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionErrorDetailTypedDict, + ) + from .realtimetranscriptionsession import ( + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionTypedDict, + ) + from .realtimetranscriptionsessioncreated import ( + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionCreatedTypedDict, + ) + from .realtimetranscriptionsessionupdated import ( + RealtimeTranscriptionSessionUpdated, + RealtimeTranscriptionSessionUpdatedTypedDict, + ) + from .referencechunk import ( + ReferenceChunk, + ReferenceChunkType, + ReferenceChunkTypedDict, + ) + from .requestsource import RequestSource + from .responsedoneevent import ( + ResponseDoneEvent, + ResponseDoneEventType, + ResponseDoneEventTypedDict, + ) + from .responseerrorevent import ( + ResponseErrorEvent, + ResponseErrorEventType, + ResponseErrorEventTypedDict, + ) + from .responseformat import ResponseFormat, ResponseFormatTypedDict + from .responseformats import ResponseFormats + from .responsestartedevent import ( + ResponseStartedEvent, + ResponseStartedEventType, + ResponseStartedEventTypedDict, + ) + from .responsevalidationerror import ResponseValidationError + from .retrieve_model_v1_models_model_id_getop import ( + RetrieveModelV1ModelsModelIDGetRequest, + RetrieveModelV1ModelsModelIDGetRequestTypedDict, + RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, + RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict, + ) + from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict + from .sampletype import SampleType + from .sdkerror import SDKError + from .security import Security, SecurityTypedDict + from .shareenum import ShareEnum + from .sharingdelete import SharingDelete, SharingDeleteTypedDict + from .sharingin import SharingIn, SharingInTypedDict + from .sharingout import SharingOut, SharingOutTypedDict + from .source import Source + from .ssetypes import SSETypes + from .systemmessage import ( + Role, + SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, + SystemMessageTypedDict, + ) + from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, + ) + from .textchunk import TextChunk, TextChunkType, TextChunkTypedDict + from .thinkchunk import ( + ThinkChunk, + ThinkChunkType, + ThinkChunkTypedDict, + Thinking, + ThinkingTypedDict, + ) + from .timestampgranularity import TimestampGranularity + from .tool import Tool, ToolTypedDict + from .toolcall import ToolCall, ToolCallTypedDict + from .toolchoice import ToolChoice, ToolChoiceTypedDict + from .toolchoiceenum import ToolChoiceEnum + from .toolexecutiondeltaevent import ( + ToolExecutionDeltaEvent, + ToolExecutionDeltaEventName, + ToolExecutionDeltaEventNameTypedDict, + ToolExecutionDeltaEventType, + ToolExecutionDeltaEventTypedDict, + ) + from .toolexecutiondoneevent import ( + ToolExecutionDoneEvent, + ToolExecutionDoneEventName, + ToolExecutionDoneEventNameTypedDict, + ToolExecutionDoneEventType, + ToolExecutionDoneEventTypedDict, + ) + from .toolexecutionentry import ( + Name, + NameTypedDict, + ToolExecutionEntry, + ToolExecutionEntryObject, + ToolExecutionEntryType, + ToolExecutionEntryTypedDict, + ) + from .toolexecutionstartedevent import ( + ToolExecutionStartedEvent, + ToolExecutionStartedEventName, + ToolExecutionStartedEventNameTypedDict, + ToolExecutionStartedEventType, + ToolExecutionStartedEventTypedDict, + ) + from .toolfilechunk import ( + ToolFileChunk, + ToolFileChunkTool, + ToolFileChunkToolTypedDict, + ToolFileChunkType, + ToolFileChunkTypedDict, + ) + from .toolmessage import ( + ToolMessage, + ToolMessageContent, + ToolMessageContentTypedDict, + ToolMessageRole, + ToolMessageTypedDict, + ) + from .toolreferencechunk import ( + ToolReferenceChunk, + ToolReferenceChunkTool, + ToolReferenceChunkToolTypedDict, + ToolReferenceChunkType, + ToolReferenceChunkTypedDict, + ) + from .tooltypes import ToolTypes + from .trainingfile import TrainingFile, TrainingFileTypedDict + from .transcriptionresponse import ( + TranscriptionResponse, + TranscriptionResponseTypedDict, + ) + from .transcriptionsegmentchunk import ( + TranscriptionSegmentChunk, + TranscriptionSegmentChunkTypedDict, + Type, + ) + from .transcriptionstreamdone import ( + TranscriptionStreamDone, + TranscriptionStreamDoneType, + TranscriptionStreamDoneTypedDict, + ) + from .transcriptionstreamevents import ( + TranscriptionStreamEvents, + TranscriptionStreamEventsData, + TranscriptionStreamEventsDataTypedDict, + TranscriptionStreamEventsTypedDict, + ) + from .transcriptionstreameventtypes import TranscriptionStreamEventTypes + from .transcriptionstreamlanguage import ( + TranscriptionStreamLanguage, + TranscriptionStreamLanguageType, + TranscriptionStreamLanguageTypedDict, + ) + from .transcriptionstreamsegmentdelta import ( + TranscriptionStreamSegmentDelta, + TranscriptionStreamSegmentDeltaType, + TranscriptionStreamSegmentDeltaTypedDict, + ) + from .transcriptionstreamtextdelta import ( + TranscriptionStreamTextDelta, + TranscriptionStreamTextDeltaType, + TranscriptionStreamTextDeltaTypedDict, + ) + from .unarchiveftmodelout import ( + UnarchiveFTModelOut, + UnarchiveFTModelOutObject, + UnarchiveFTModelOutTypedDict, + ) + from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict + from .uploadfileout import UploadFileOut, UploadFileOutTypedDict + from .usageinfo import UsageInfo, UsageInfoTypedDict + from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageRole, + UserMessageTypedDict, + ) + from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, + ) + from .wandbintegration import ( + WandbIntegration, + WandbIntegrationType, + WandbIntegrationTypedDict, + ) + from .wandbintegrationout import ( + WandbIntegrationOut, + WandbIntegrationOutType, + WandbIntegrationOutTypedDict, + ) + from .websearchpremiumtool import ( + WebSearchPremiumTool, + WebSearchPremiumToolType, + WebSearchPremiumToolTypedDict, + ) + from .websearchtool import WebSearchTool, WebSearchToolType, WebSearchToolTypedDict + +__all__ = [ + "APIEndpoint", + "Agent", + "AgentAliasResponse", + "AgentAliasResponseTypedDict", + "AgentConversation", + "AgentConversationAgentVersion", + "AgentConversationAgentVersionTypedDict", + "AgentConversationObject", + "AgentConversationTypedDict", + "AgentCreationRequest", + "AgentCreationRequestTools", + "AgentCreationRequestToolsTypedDict", + "AgentCreationRequestTypedDict", + "AgentHandoffDoneEvent", + "AgentHandoffDoneEventType", + "AgentHandoffDoneEventTypedDict", + "AgentHandoffEntry", + "AgentHandoffEntryObject", + "AgentHandoffEntryType", + "AgentHandoffEntryTypedDict", + "AgentHandoffStartedEvent", + "AgentHandoffStartedEventType", + "AgentHandoffStartedEventTypedDict", + "AgentObject", + "AgentTools", + "AgentToolsTypedDict", + "AgentTypedDict", + "AgentUpdateRequest", + "AgentUpdateRequestTools", + "AgentUpdateRequestToolsTypedDict", + "AgentUpdateRequestTypedDict", + "AgentVersion", + "AgentVersionTypedDict", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequest", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict", + "AgentsAPIV1AgentsDeleteRequest", + "AgentsAPIV1AgentsDeleteRequestTypedDict", + "AgentsAPIV1AgentsGetRequest", + "AgentsAPIV1AgentsGetRequestTypedDict", + "AgentsAPIV1AgentsGetVersionRequest", + "AgentsAPIV1AgentsGetVersionRequestTypedDict", + "AgentsAPIV1AgentsListRequest", + "AgentsAPIV1AgentsListRequestTypedDict", + "AgentsAPIV1AgentsListVersionAliasesRequest", + "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict", + "AgentsAPIV1AgentsListVersionsRequest", + "AgentsAPIV1AgentsListVersionsRequestTypedDict", + "AgentsAPIV1AgentsUpdateRequest", + "AgentsAPIV1AgentsUpdateRequestTypedDict", + "AgentsAPIV1AgentsUpdateVersionRequest", + "AgentsAPIV1AgentsUpdateVersionRequestTypedDict", + "AgentsAPIV1ConversationsAppendRequest", + "AgentsAPIV1ConversationsAppendRequestTypedDict", + "AgentsAPIV1ConversationsAppendStreamRequest", + "AgentsAPIV1ConversationsAppendStreamRequestTypedDict", + "AgentsAPIV1ConversationsDeleteRequest", + "AgentsAPIV1ConversationsDeleteRequestTypedDict", + "AgentsAPIV1ConversationsGetRequest", + "AgentsAPIV1ConversationsGetRequestTypedDict", + "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", + "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict", + "AgentsAPIV1ConversationsHistoryRequest", + "AgentsAPIV1ConversationsHistoryRequestTypedDict", + "AgentsAPIV1ConversationsListRequest", + "AgentsAPIV1ConversationsListRequestTypedDict", + "AgentsAPIV1ConversationsMessagesRequest", + "AgentsAPIV1ConversationsMessagesRequestTypedDict", + "AgentsAPIV1ConversationsRestartRequest", + "AgentsAPIV1ConversationsRestartRequestTypedDict", + "AgentsAPIV1ConversationsRestartStreamRequest", + "AgentsAPIV1ConversationsRestartStreamRequestTypedDict", + "AgentsCompletionRequest", + "AgentsCompletionRequestMessages", + "AgentsCompletionRequestMessagesTypedDict", + "AgentsCompletionRequestStop", + "AgentsCompletionRequestStopTypedDict", + "AgentsCompletionRequestToolChoice", + "AgentsCompletionRequestToolChoiceTypedDict", + "AgentsCompletionRequestTypedDict", + "AgentsCompletionStreamRequest", + "AgentsCompletionStreamRequestMessages", + "AgentsCompletionStreamRequestMessagesTypedDict", + "AgentsCompletionStreamRequestStop", + "AgentsCompletionStreamRequestStopTypedDict", + "AgentsCompletionStreamRequestToolChoice", + "AgentsCompletionStreamRequestToolChoiceTypedDict", + "AgentsCompletionStreamRequestTypedDict", + "ArchiveFTModelOut", + "ArchiveFTModelOutObject", + "ArchiveFTModelOutTypedDict", + "Arguments", + "ArgumentsTypedDict", + "AssistantMessage", + "AssistantMessageContent", + "AssistantMessageContentTypedDict", + "AssistantMessageRole", + "AssistantMessageTypedDict", + "Attributes", + "AttributesTypedDict", + "AudioChunk", + "AudioChunkType", + "AudioChunkTypedDict", + "AudioEncoding", + "AudioFormat", + "AudioFormatTypedDict", + "AudioTranscriptionRequest", + "AudioTranscriptionRequestStream", + "AudioTranscriptionRequestStreamTypedDict", + "AudioTranscriptionRequestTypedDict", + "BaseModelCard", + "BaseModelCardType", + "BaseModelCardTypedDict", + "BatchError", + "BatchErrorTypedDict", + "BatchJobIn", + "BatchJobInTypedDict", + "BatchJobOut", + "BatchJobOutObject", + "BatchJobOutTypedDict", + "BatchJobStatus", + "BatchJobsOut", + "BatchJobsOutObject", + "BatchJobsOutTypedDict", + "BatchRequest", + "BatchRequestTypedDict", + "BuiltInConnectors", + "ChatClassificationRequest", + "ChatClassificationRequestTypedDict", + "ChatCompletionChoice", + "ChatCompletionChoiceTypedDict", + "ChatCompletionRequest", + "ChatCompletionRequestToolChoice", + "ChatCompletionRequestToolChoiceTypedDict", + "ChatCompletionRequestTypedDict", + "ChatCompletionResponse", + "ChatCompletionResponseTypedDict", + "ChatCompletionStreamRequest", + "ChatCompletionStreamRequestMessages", + "ChatCompletionStreamRequestMessagesTypedDict", + "ChatCompletionStreamRequestStop", + "ChatCompletionStreamRequestStopTypedDict", + "ChatCompletionStreamRequestToolChoice", + "ChatCompletionStreamRequestToolChoiceTypedDict", + "ChatCompletionStreamRequestTypedDict", + "ChatModerationRequest", + "ChatModerationRequestInputs", + "ChatModerationRequestInputsTypedDict", + "ChatModerationRequestTypedDict", + "CheckpointOut", + "CheckpointOutTypedDict", + "ClassificationRequest", + "ClassificationRequestInputs", + "ClassificationRequestInputsTypedDict", + "ClassificationRequestTypedDict", + "ClassificationResponse", + "ClassificationResponseTypedDict", + "ClassificationTargetResult", + "ClassificationTargetResultTypedDict", + "ClassifierDetailedJobOut", + "ClassifierDetailedJobOutIntegrations", + "ClassifierDetailedJobOutIntegrationsTypedDict", + "ClassifierDetailedJobOutJobType", + "ClassifierDetailedJobOutObject", + "ClassifierDetailedJobOutStatus", + "ClassifierDetailedJobOutTypedDict", + "ClassifierFTModelOut", + "ClassifierFTModelOutModelType", + "ClassifierFTModelOutObject", + "ClassifierFTModelOutTypedDict", + "ClassifierJobOut", + "ClassifierJobOutIntegrations", + "ClassifierJobOutIntegrationsTypedDict", + "ClassifierJobOutJobType", + "ClassifierJobOutObject", + "ClassifierJobOutStatus", + "ClassifierJobOutTypedDict", + "ClassifierTargetIn", + "ClassifierTargetInTypedDict", + "ClassifierTargetOut", + "ClassifierTargetOutTypedDict", + "ClassifierTrainingParameters", + "ClassifierTrainingParametersIn", + "ClassifierTrainingParametersInTypedDict", + "ClassifierTrainingParametersTypedDict", + "CodeInterpreterTool", + "CodeInterpreterToolType", + "CodeInterpreterToolTypedDict", + "CompletionArgs", + "CompletionArgsStop", + "CompletionArgsStopTypedDict", + "CompletionArgsTypedDict", + "CompletionChunk", + "CompletionChunkTypedDict", + "CompletionDetailedJobOut", + "CompletionDetailedJobOutIntegrations", + "CompletionDetailedJobOutIntegrationsTypedDict", + "CompletionDetailedJobOutJobType", + "CompletionDetailedJobOutObject", + "CompletionDetailedJobOutRepositories", + "CompletionDetailedJobOutRepositoriesTypedDict", + "CompletionDetailedJobOutStatus", + "CompletionDetailedJobOutTypedDict", + "CompletionEvent", + "CompletionEventTypedDict", + "CompletionFTModelOut", + "CompletionFTModelOutObject", + "CompletionFTModelOutTypedDict", + "CompletionJobOut", + "CompletionJobOutObject", + "CompletionJobOutTypedDict", + "CompletionResponseStreamChoice", + "CompletionResponseStreamChoiceFinishReason", + "CompletionResponseStreamChoiceTypedDict", + "CompletionTrainingParameters", + "CompletionTrainingParametersIn", + "CompletionTrainingParametersInTypedDict", + "CompletionTrainingParametersTypedDict", + "Content", + "ContentChunk", + "ContentChunkTypedDict", + "ContentTypedDict", + "ConversationAppendRequest", + "ConversationAppendRequestHandoffExecution", + "ConversationAppendRequestTypedDict", + "ConversationAppendStreamRequest", + "ConversationAppendStreamRequestHandoffExecution", + "ConversationAppendStreamRequestTypedDict", + "ConversationEvents", + "ConversationEventsData", + "ConversationEventsDataTypedDict", + "ConversationEventsTypedDict", + "ConversationHistory", + "ConversationHistoryObject", + "ConversationHistoryTypedDict", + "ConversationInputs", + "ConversationInputsTypedDict", + "ConversationMessages", + "ConversationMessagesObject", + "ConversationMessagesTypedDict", + "ConversationRequest", + "ConversationRequestTypedDict", + "ConversationResponse", + "ConversationResponseObject", + "ConversationResponseTypedDict", + "ConversationRestartRequest", + "ConversationRestartRequestAgentVersion", + "ConversationRestartRequestAgentVersionTypedDict", + "ConversationRestartRequestHandoffExecution", + "ConversationRestartRequestTypedDict", + "ConversationRestartStreamRequest", + "ConversationRestartStreamRequestAgentVersion", + "ConversationRestartStreamRequestAgentVersionTypedDict", + "ConversationRestartStreamRequestHandoffExecution", + "ConversationRestartStreamRequestTypedDict", + "ConversationStreamRequest", + "ConversationStreamRequestAgentVersion", + "ConversationStreamRequestAgentVersionTypedDict", + "ConversationStreamRequestHandoffExecution", + "ConversationStreamRequestTools", + "ConversationStreamRequestToolsTypedDict", + "ConversationStreamRequestTypedDict", + "ConversationUsageInfo", + "ConversationUsageInfoTypedDict", + "Data", + "DataTypedDict", + "DeleteFileOut", + "DeleteFileOutTypedDict", + "DeleteModelOut", + "DeleteModelOutTypedDict", + "DeleteModelV1ModelsModelIDDeleteRequest", + "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", + "DeltaMessage", + "DeltaMessageTypedDict", + "Document", + "DocumentLibraryTool", + "DocumentLibraryToolType", + "DocumentLibraryToolTypedDict", + "DocumentOut", + "DocumentOutTypedDict", + "DocumentTextContent", + "DocumentTextContentTypedDict", + "DocumentTypedDict", + "DocumentURLChunk", + "DocumentURLChunkType", + "DocumentURLChunkTypedDict", + "DocumentUpdateIn", + "DocumentUpdateInTypedDict", + "EmbeddingDtype", + "EmbeddingRequest", + "EmbeddingRequestInputs", + "EmbeddingRequestInputsTypedDict", + "EmbeddingRequestTypedDict", + "EmbeddingResponse", + "EmbeddingResponseData", + "EmbeddingResponseDataTypedDict", + "EmbeddingResponseTypedDict", + "EncodingFormat", + "EntityType", + "Entries", + "EntriesTypedDict", + "EventOut", + "EventOutTypedDict", + "FIMCompletionRequest", + "FIMCompletionRequestStop", + "FIMCompletionRequestStopTypedDict", + "FIMCompletionRequestTypedDict", + "FIMCompletionResponse", + "FIMCompletionResponseTypedDict", + "FIMCompletionStreamRequest", + "FIMCompletionStreamRequestStop", + "FIMCompletionStreamRequestStopTypedDict", + "FIMCompletionStreamRequestTypedDict", + "FTClassifierLossFunction", + "FTModelCapabilitiesOut", + "FTModelCapabilitiesOutTypedDict", + "FTModelCard", + "FTModelCardType", + "FTModelCardTypedDict", + "File", + "FileChunk", + "FileChunkTypedDict", + "FilePurpose", + "FileSchema", + "FileSchemaTypedDict", + "FileSignedURL", + "FileSignedURLTypedDict", + "FileTypedDict", + "FilesAPIRoutesDeleteFileRequest", + "FilesAPIRoutesDeleteFileRequestTypedDict", + "FilesAPIRoutesDownloadFileRequest", + "FilesAPIRoutesDownloadFileRequestTypedDict", + "FilesAPIRoutesGetSignedURLRequest", + "FilesAPIRoutesGetSignedURLRequestTypedDict", + "FilesAPIRoutesListFilesRequest", + "FilesAPIRoutesListFilesRequestTypedDict", + "FilesAPIRoutesRetrieveFileRequest", + "FilesAPIRoutesRetrieveFileRequestTypedDict", + "FilesAPIRoutesUploadFileMultiPartBodyParams", + "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", + "FineTuneableModelType", + "FinishReason", + "Format", + "Function", + "FunctionCall", + "FunctionCallEntry", + "FunctionCallEntryArguments", + "FunctionCallEntryArgumentsTypedDict", + "FunctionCallEntryObject", + "FunctionCallEntryType", + "FunctionCallEntryTypedDict", + "FunctionCallEvent", + "FunctionCallEventType", + "FunctionCallEventTypedDict", + "FunctionCallTypedDict", + "FunctionName", + "FunctionNameTypedDict", + "FunctionResultEntry", + "FunctionResultEntryObject", + "FunctionResultEntryType", + "FunctionResultEntryTypedDict", + "FunctionTool", + "FunctionToolType", + "FunctionToolTypedDict", + "FunctionTypedDict", + "GithubRepositoryIn", + "GithubRepositoryInType", + "GithubRepositoryInTypedDict", + "GithubRepositoryOut", + "GithubRepositoryOutType", + "GithubRepositoryOutTypedDict", + "HTTPValidationError", + "HTTPValidationErrorData", + "HandoffExecution", + "Hyperparameters", + "HyperparametersTypedDict", + "ImageGenerationTool", + "ImageGenerationToolType", + "ImageGenerationToolTypedDict", + "ImageURL", + "ImageURLChunk", + "ImageURLChunkImageURL", + "ImageURLChunkImageURLTypedDict", + "ImageURLChunkType", + "ImageURLChunkTypedDict", + "ImageURLTypedDict", + "InputEntries", + "InputEntriesTypedDict", + "Inputs", + "InputsTypedDict", + "InstructRequest", + "InstructRequestInputs", + "InstructRequestInputsMessages", + "InstructRequestInputsMessagesTypedDict", + "InstructRequestInputsTypedDict", + "InstructRequestMessages", + "InstructRequestMessagesTypedDict", + "InstructRequestTypedDict", + "Integrations", + "IntegrationsTypedDict", + "JSONSchema", + "JSONSchemaTypedDict", + "JobIn", + "JobInIntegrations", + "JobInIntegrationsTypedDict", + "JobInRepositories", + "JobInRepositoriesTypedDict", + "JobInTypedDict", + "JobMetadataOut", + "JobMetadataOutTypedDict", + "JobType", + "JobsAPIRoutesBatchCancelBatchJobRequest", + "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict", + "JobsAPIRoutesBatchGetBatchJobRequest", + "JobsAPIRoutesBatchGetBatchJobRequestTypedDict", + "JobsAPIRoutesBatchGetBatchJobsRequest", + "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponse", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobRequest", + "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobResponse", + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", + "JobsAPIRoutesFineTuningStartFineTuningJobRequest", + "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningStartFineTuningJobResponse", + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", + "JobsOut", + "JobsOutData", + "JobsOutDataTypedDict", + "JobsOutObject", + "JobsOutTypedDict", + "LegacyJobMetadataOut", + "LegacyJobMetadataOutObject", + "LegacyJobMetadataOutTypedDict", + "LibrariesDeleteV1Request", + "LibrariesDeleteV1RequestTypedDict", + "LibrariesDocumentsDeleteV1Request", + "LibrariesDocumentsDeleteV1RequestTypedDict", + "LibrariesDocumentsGetExtractedTextSignedURLV1Request", + "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict", + "LibrariesDocumentsGetSignedURLV1Request", + "LibrariesDocumentsGetSignedURLV1RequestTypedDict", + "LibrariesDocumentsGetStatusV1Request", + "LibrariesDocumentsGetStatusV1RequestTypedDict", + "LibrariesDocumentsGetTextContentV1Request", + "LibrariesDocumentsGetTextContentV1RequestTypedDict", + "LibrariesDocumentsGetV1Request", + "LibrariesDocumentsGetV1RequestTypedDict", + "LibrariesDocumentsListV1Request", + "LibrariesDocumentsListV1RequestTypedDict", + "LibrariesDocumentsReprocessV1Request", + "LibrariesDocumentsReprocessV1RequestTypedDict", + "LibrariesDocumentsUpdateV1Request", + "LibrariesDocumentsUpdateV1RequestTypedDict", + "LibrariesDocumentsUploadV1DocumentUpload", + "LibrariesDocumentsUploadV1DocumentUploadTypedDict", + "LibrariesDocumentsUploadV1Request", + "LibrariesDocumentsUploadV1RequestTypedDict", + "LibrariesGetV1Request", + "LibrariesGetV1RequestTypedDict", + "LibrariesShareCreateV1Request", + "LibrariesShareCreateV1RequestTypedDict", + "LibrariesShareDeleteV1Request", + "LibrariesShareDeleteV1RequestTypedDict", + "LibrariesShareListV1Request", + "LibrariesShareListV1RequestTypedDict", + "LibrariesUpdateV1Request", + "LibrariesUpdateV1RequestTypedDict", + "LibraryIn", + "LibraryInTypedDict", + "LibraryInUpdate", + "LibraryInUpdateTypedDict", + "LibraryOut", + "LibraryOutTypedDict", + "ListDocumentOut", + "ListDocumentOutTypedDict", + "ListFilesOut", + "ListFilesOutTypedDict", + "ListLibraryOut", + "ListLibraryOutTypedDict", + "ListSharingOut", + "ListSharingOutTypedDict", + "Loc", + "LocTypedDict", + "Message", + "MessageEntries", + "MessageEntriesTypedDict", + "MessageInputContentChunks", + "MessageInputContentChunksTypedDict", + "MessageInputEntry", + "MessageInputEntryContent", + "MessageInputEntryContentTypedDict", + "MessageInputEntryRole", + "MessageInputEntryType", + "MessageInputEntryTypedDict", + "MessageOutputContentChunks", + "MessageOutputContentChunksTypedDict", + "MessageOutputEntry", + "MessageOutputEntryContent", + "MessageOutputEntryContentTypedDict", + "MessageOutputEntryObject", + "MessageOutputEntryRole", + "MessageOutputEntryType", + "MessageOutputEntryTypedDict", + "MessageOutputEvent", + "MessageOutputEventContent", + "MessageOutputEventContentTypedDict", + "MessageOutputEventRole", + "MessageOutputEventType", + "MessageOutputEventTypedDict", + "MessageTypedDict", + "Messages", + "MessagesTypedDict", + "MetricOut", + "MetricOutTypedDict", + "MistralError", + "MistralPromptMode", + "ModelCapabilities", + "ModelCapabilitiesTypedDict", + "ModelConversation", + "ModelConversationObject", + "ModelConversationTools", + "ModelConversationToolsTypedDict", + "ModelConversationTypedDict", + "ModelList", + "ModelListTypedDict", + "ModelType", + "ModerationObject", + "ModerationObjectTypedDict", + "ModerationResponse", + "ModerationResponseTypedDict", + "Name", + "NameTypedDict", + "NoResponseError", + "OCRImageObject", + "OCRImageObjectTypedDict", + "OCRPageDimensions", + "OCRPageDimensionsTypedDict", + "OCRPageObject", + "OCRPageObjectTypedDict", + "OCRRequest", + "OCRRequestTypedDict", + "OCRResponse", + "OCRResponseTypedDict", + "OCRTableObject", + "OCRTableObjectTypedDict", + "OCRUsageInfo", + "OCRUsageInfoTypedDict", + "Object", + "One", + "OneTypedDict", + "OutputContentChunks", + "OutputContentChunksTypedDict", + "Outputs", + "OutputsTypedDict", + "PaginationInfo", + "PaginationInfoTypedDict", + "Prediction", + "PredictionTypedDict", + "ProcessingStatusOut", + "ProcessingStatusOutTypedDict", + "QueryParamAgentVersion", + "QueryParamAgentVersionTypedDict", + "QueryParamStatus", + "RealtimeTranscriptionError", + "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionErrorDetailTypedDict", + "RealtimeTranscriptionErrorTypedDict", + "RealtimeTranscriptionSession", + "RealtimeTranscriptionSessionCreated", + "RealtimeTranscriptionSessionCreatedTypedDict", + "RealtimeTranscriptionSessionTypedDict", + "RealtimeTranscriptionSessionUpdated", + "RealtimeTranscriptionSessionUpdatedTypedDict", + "ReferenceChunk", + "ReferenceChunkType", + "ReferenceChunkTypedDict", + "Repositories", + "RepositoriesTypedDict", + "RequestSource", + "Response1", + "Response1TypedDict", + "ResponseBody", + "ResponseBodyTypedDict", + "ResponseDoneEvent", + "ResponseDoneEventType", + "ResponseDoneEventTypedDict", + "ResponseErrorEvent", + "ResponseErrorEventType", + "ResponseErrorEventTypedDict", + "ResponseFormat", + "ResponseFormatTypedDict", + "ResponseFormats", + "ResponseStartedEvent", + "ResponseStartedEventType", + "ResponseStartedEventTypedDict", + "ResponseValidationError", + "RetrieveFileOut", + "RetrieveFileOutTypedDict", + "RetrieveModelV1ModelsModelIDGetRequest", + "RetrieveModelV1ModelsModelIDGetRequestTypedDict", + "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet", + "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", + "Role", + "SDKError", + "SSETypes", + "SampleType", + "Security", + "SecurityTypedDict", + "ShareEnum", + "SharingDelete", + "SharingDeleteTypedDict", + "SharingIn", + "SharingInTypedDict", + "SharingOut", + "SharingOutTypedDict", + "Source", + "Status", + "Stop", + "StopTypedDict", + "SystemMessage", + "SystemMessageContent", + "SystemMessageContentChunks", + "SystemMessageContentChunksTypedDict", + "SystemMessageContentTypedDict", + "SystemMessageTypedDict", + "TableFormat", + "TextChunk", + "TextChunkType", + "TextChunkTypedDict", + "ThinkChunk", + "ThinkChunkType", + "ThinkChunkTypedDict", + "Thinking", + "ThinkingTypedDict", + "TimestampGranularity", + "Tool", + "ToolCall", + "ToolCallTypedDict", + "ToolChoice", + "ToolChoiceEnum", + "ToolChoiceTypedDict", + "ToolExecutionDeltaEvent", + "ToolExecutionDeltaEventName", + "ToolExecutionDeltaEventNameTypedDict", + "ToolExecutionDeltaEventType", + "ToolExecutionDeltaEventTypedDict", + "ToolExecutionDoneEvent", + "ToolExecutionDoneEventName", + "ToolExecutionDoneEventNameTypedDict", + "ToolExecutionDoneEventType", + "ToolExecutionDoneEventTypedDict", + "ToolExecutionEntry", + "ToolExecutionEntryObject", + "ToolExecutionEntryType", + "ToolExecutionEntryTypedDict", + "ToolExecutionStartedEvent", + "ToolExecutionStartedEventName", + "ToolExecutionStartedEventNameTypedDict", + "ToolExecutionStartedEventType", + "ToolExecutionStartedEventTypedDict", + "ToolFileChunk", + "ToolFileChunkTool", + "ToolFileChunkToolTypedDict", + "ToolFileChunkType", + "ToolFileChunkTypedDict", + "ToolMessage", + "ToolMessageContent", + "ToolMessageContentTypedDict", + "ToolMessageRole", + "ToolMessageTypedDict", + "ToolReferenceChunk", + "ToolReferenceChunkTool", + "ToolReferenceChunkToolTypedDict", + "ToolReferenceChunkType", + "ToolReferenceChunkTypedDict", + "ToolTypedDict", + "ToolTypes", + "Tools", + "ToolsTypedDict", + "TrainingFile", + "TrainingFileTypedDict", + "TranscriptionResponse", + "TranscriptionResponseTypedDict", + "TranscriptionSegmentChunk", + "TranscriptionSegmentChunkTypedDict", + "TranscriptionStreamDone", + "TranscriptionStreamDoneType", + "TranscriptionStreamDoneTypedDict", + "TranscriptionStreamEventTypes", + "TranscriptionStreamEvents", + "TranscriptionStreamEventsData", + "TranscriptionStreamEventsDataTypedDict", + "TranscriptionStreamEventsTypedDict", + "TranscriptionStreamLanguage", + "TranscriptionStreamLanguageType", + "TranscriptionStreamLanguageTypedDict", + "TranscriptionStreamSegmentDelta", + "TranscriptionStreamSegmentDeltaType", + "TranscriptionStreamSegmentDeltaTypedDict", + "TranscriptionStreamTextDelta", + "TranscriptionStreamTextDeltaType", + "TranscriptionStreamTextDeltaTypedDict", + "Two", + "TwoTypedDict", + "Type", + "UnarchiveFTModelOut", + "UnarchiveFTModelOutObject", + "UnarchiveFTModelOutTypedDict", + "UpdateFTModelIn", + "UpdateFTModelInTypedDict", + "UploadFileOut", + "UploadFileOutTypedDict", + "UsageInfo", + "UsageInfoTypedDict", + "UserMessage", + "UserMessageContent", + "UserMessageContentTypedDict", + "UserMessageRole", + "UserMessageTypedDict", + "ValidationError", + "ValidationErrorTypedDict", + "WandbIntegration", + "WandbIntegrationOut", + "WandbIntegrationOutType", + "WandbIntegrationOutTypedDict", + "WandbIntegrationType", + "WandbIntegrationTypedDict", + "WebSearchPremiumTool", + "WebSearchPremiumToolType", + "WebSearchPremiumToolTypedDict", + "WebSearchTool", + "WebSearchToolType", + "WebSearchToolTypedDict", +] + +_dynamic_imports: dict[str, str] = { + "Agent": ".agent", + "AgentObject": ".agent", + "AgentTools": ".agent", + "AgentToolsTypedDict": ".agent", + "AgentTypedDict": ".agent", + "AgentAliasResponse": ".agentaliasresponse", + "AgentAliasResponseTypedDict": ".agentaliasresponse", + "AgentConversation": ".agentconversation", + "AgentConversationAgentVersion": ".agentconversation", + "AgentConversationAgentVersionTypedDict": ".agentconversation", + "AgentConversationObject": ".agentconversation", + "AgentConversationTypedDict": ".agentconversation", + "AgentCreationRequest": ".agentcreationrequest", + "AgentCreationRequestTools": ".agentcreationrequest", + "AgentCreationRequestToolsTypedDict": ".agentcreationrequest", + "AgentCreationRequestTypedDict": ".agentcreationrequest", + "AgentHandoffDoneEvent": ".agenthandoffdoneevent", + "AgentHandoffDoneEventType": ".agenthandoffdoneevent", + "AgentHandoffDoneEventTypedDict": ".agenthandoffdoneevent", + "AgentHandoffEntry": ".agenthandoffentry", + "AgentHandoffEntryObject": ".agenthandoffentry", + "AgentHandoffEntryType": ".agenthandoffentry", + "AgentHandoffEntryTypedDict": ".agenthandoffentry", + "AgentHandoffStartedEvent": ".agenthandoffstartedevent", + "AgentHandoffStartedEventType": ".agenthandoffstartedevent", + "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequest": ".agents_api_v1_agents_create_or_update_aliasop", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict": ".agents_api_v1_agents_create_or_update_aliasop", + "AgentsAPIV1AgentsDeleteRequest": ".agents_api_v1_agents_deleteop", + "AgentsAPIV1AgentsDeleteRequestTypedDict": ".agents_api_v1_agents_deleteop", + "AgentsAPIV1AgentsGetVersionRequest": ".agents_api_v1_agents_get_versionop", + "AgentsAPIV1AgentsGetVersionRequestTypedDict": ".agents_api_v1_agents_get_versionop", + "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", + "QueryParamAgentVersion": ".agents_api_v1_agents_getop", + "QueryParamAgentVersionTypedDict": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsListVersionAliasesRequest": ".agents_api_v1_agents_list_version_aliasesop", + "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict": ".agents_api_v1_agents_list_version_aliasesop", + "AgentsAPIV1AgentsListVersionsRequest": ".agents_api_v1_agents_list_versionsop", + "AgentsAPIV1AgentsListVersionsRequestTypedDict": ".agents_api_v1_agents_list_versionsop", + "AgentsAPIV1AgentsListRequest": ".agents_api_v1_agents_listop", + "AgentsAPIV1AgentsListRequestTypedDict": ".agents_api_v1_agents_listop", + "AgentsAPIV1AgentsUpdateVersionRequest": ".agents_api_v1_agents_update_versionop", + "AgentsAPIV1AgentsUpdateVersionRequestTypedDict": ".agents_api_v1_agents_update_versionop", + "AgentsAPIV1AgentsUpdateRequest": ".agents_api_v1_agents_updateop", + "AgentsAPIV1AgentsUpdateRequestTypedDict": ".agents_api_v1_agents_updateop", + "AgentsAPIV1ConversationsAppendStreamRequest": ".agents_api_v1_conversations_append_streamop", + "AgentsAPIV1ConversationsAppendStreamRequestTypedDict": ".agents_api_v1_conversations_append_streamop", + "AgentsAPIV1ConversationsAppendRequest": ".agents_api_v1_conversations_appendop", + "AgentsAPIV1ConversationsAppendRequestTypedDict": ".agents_api_v1_conversations_appendop", + "AgentsAPIV1ConversationsDeleteRequest": ".agents_api_v1_conversations_deleteop", + "AgentsAPIV1ConversationsDeleteRequestTypedDict": ".agents_api_v1_conversations_deleteop", + "AgentsAPIV1ConversationsGetRequest": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsGetRequestTypedDict": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsGetResponseV1ConversationsGet": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsHistoryRequest": ".agents_api_v1_conversations_historyop", + "AgentsAPIV1ConversationsHistoryRequestTypedDict": ".agents_api_v1_conversations_historyop", + "AgentsAPIV1ConversationsListRequest": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListRequestTypedDict": ".agents_api_v1_conversations_listop", + "ResponseBody": ".agents_api_v1_conversations_listop", + "ResponseBodyTypedDict": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsMessagesRequest": ".agents_api_v1_conversations_messagesop", + "AgentsAPIV1ConversationsMessagesRequestTypedDict": ".agents_api_v1_conversations_messagesop", + "AgentsAPIV1ConversationsRestartStreamRequest": ".agents_api_v1_conversations_restart_streamop", + "AgentsAPIV1ConversationsRestartStreamRequestTypedDict": ".agents_api_v1_conversations_restart_streamop", + "AgentsAPIV1ConversationsRestartRequest": ".agents_api_v1_conversations_restartop", + "AgentsAPIV1ConversationsRestartRequestTypedDict": ".agents_api_v1_conversations_restartop", + "AgentsCompletionRequest": ".agentscompletionrequest", + "AgentsCompletionRequestMessages": ".agentscompletionrequest", + "AgentsCompletionRequestMessagesTypedDict": ".agentscompletionrequest", + "AgentsCompletionRequestStop": ".agentscompletionrequest", + "AgentsCompletionRequestStopTypedDict": ".agentscompletionrequest", + "AgentsCompletionRequestToolChoice": ".agentscompletionrequest", + "AgentsCompletionRequestToolChoiceTypedDict": ".agentscompletionrequest", + "AgentsCompletionRequestTypedDict": ".agentscompletionrequest", + "AgentsCompletionStreamRequest": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestMessages": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestMessagesTypedDict": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestStop": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestStopTypedDict": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestToolChoice": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestToolChoiceTypedDict": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestTypedDict": ".agentscompletionstreamrequest", + "AgentUpdateRequest": ".agentupdaterequest", + "AgentUpdateRequestTools": ".agentupdaterequest", + "AgentUpdateRequestToolsTypedDict": ".agentupdaterequest", + "AgentUpdateRequestTypedDict": ".agentupdaterequest", + "APIEndpoint": ".apiendpoint", + "ArchiveFTModelOut": ".archiveftmodelout", + "ArchiveFTModelOutObject": ".archiveftmodelout", + "ArchiveFTModelOutTypedDict": ".archiveftmodelout", + "AssistantMessage": ".assistantmessage", + "AssistantMessageContent": ".assistantmessage", + "AssistantMessageContentTypedDict": ".assistantmessage", + "AssistantMessageRole": ".assistantmessage", + "AssistantMessageTypedDict": ".assistantmessage", + "AudioChunk": ".audiochunk", + "AudioChunkType": ".audiochunk", + "AudioChunkTypedDict": ".audiochunk", + "AudioEncoding": ".audioencoding", + "AudioFormat": ".audioformat", + "AudioFormatTypedDict": ".audioformat", + "AudioTranscriptionRequest": ".audiotranscriptionrequest", + "AudioTranscriptionRequestTypedDict": ".audiotranscriptionrequest", + "AudioTranscriptionRequestStream": ".audiotranscriptionrequeststream", + "AudioTranscriptionRequestStreamTypedDict": ".audiotranscriptionrequeststream", + "BaseModelCard": ".basemodelcard", + "BaseModelCardType": ".basemodelcard", + "BaseModelCardTypedDict": ".basemodelcard", + "BatchError": ".batcherror", + "BatchErrorTypedDict": ".batcherror", + "BatchJobIn": ".batchjobin", + "BatchJobInTypedDict": ".batchjobin", + "BatchJobOut": ".batchjobout", + "BatchJobOutObject": ".batchjobout", + "BatchJobOutTypedDict": ".batchjobout", + "BatchJobsOut": ".batchjobsout", + "BatchJobsOutObject": ".batchjobsout", + "BatchJobsOutTypedDict": ".batchjobsout", + "BatchJobStatus": ".batchjobstatus", + "BatchRequest": ".batchrequest", + "BatchRequestTypedDict": ".batchrequest", + "BuiltInConnectors": ".builtinconnectors", + "ChatClassificationRequest": ".chatclassificationrequest", + "ChatClassificationRequestTypedDict": ".chatclassificationrequest", + "ChatCompletionChoice": ".chatcompletionchoice", + "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", + "FinishReason": ".chatcompletionchoice", + "ChatCompletionRequest": ".chatcompletionrequest", + "ChatCompletionRequestToolChoice": ".chatcompletionrequest", + "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestTypedDict": ".chatcompletionrequest", + "Messages": ".chatcompletionrequest", + "MessagesTypedDict": ".chatcompletionrequest", + "Stop": ".chatcompletionrequest", + "StopTypedDict": ".chatcompletionrequest", + "ChatCompletionResponse": ".chatcompletionresponse", + "ChatCompletionResponseTypedDict": ".chatcompletionresponse", + "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessages": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessagesTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStop": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStopTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", + "ChatModerationRequest": ".chatmoderationrequest", + "ChatModerationRequestInputs": ".chatmoderationrequest", + "ChatModerationRequestInputsTypedDict": ".chatmoderationrequest", + "ChatModerationRequestTypedDict": ".chatmoderationrequest", + "One": ".chatmoderationrequest", + "OneTypedDict": ".chatmoderationrequest", + "Two": ".chatmoderationrequest", + "TwoTypedDict": ".chatmoderationrequest", + "CheckpointOut": ".checkpointout", + "CheckpointOutTypedDict": ".checkpointout", + "ClassificationRequest": ".classificationrequest", + "ClassificationRequestInputs": ".classificationrequest", + "ClassificationRequestInputsTypedDict": ".classificationrequest", + "ClassificationRequestTypedDict": ".classificationrequest", + "ClassificationResponse": ".classificationresponse", + "ClassificationResponseTypedDict": ".classificationresponse", + "ClassificationTargetResult": ".classificationtargetresult", + "ClassificationTargetResultTypedDict": ".classificationtargetresult", + "ClassifierDetailedJobOut": ".classifierdetailedjobout", + "ClassifierDetailedJobOutIntegrations": ".classifierdetailedjobout", + "ClassifierDetailedJobOutIntegrationsTypedDict": ".classifierdetailedjobout", + "ClassifierDetailedJobOutJobType": ".classifierdetailedjobout", + "ClassifierDetailedJobOutObject": ".classifierdetailedjobout", + "ClassifierDetailedJobOutStatus": ".classifierdetailedjobout", + "ClassifierDetailedJobOutTypedDict": ".classifierdetailedjobout", + "ClassifierFTModelOut": ".classifierftmodelout", + "ClassifierFTModelOutModelType": ".classifierftmodelout", + "ClassifierFTModelOutObject": ".classifierftmodelout", + "ClassifierFTModelOutTypedDict": ".classifierftmodelout", + "ClassifierJobOut": ".classifierjobout", + "ClassifierJobOutIntegrations": ".classifierjobout", + "ClassifierJobOutIntegrationsTypedDict": ".classifierjobout", + "ClassifierJobOutJobType": ".classifierjobout", + "ClassifierJobOutObject": ".classifierjobout", + "ClassifierJobOutStatus": ".classifierjobout", + "ClassifierJobOutTypedDict": ".classifierjobout", + "ClassifierTargetIn": ".classifiertargetin", + "ClassifierTargetInTypedDict": ".classifiertargetin", + "ClassifierTargetOut": ".classifiertargetout", + "ClassifierTargetOutTypedDict": ".classifiertargetout", + "ClassifierTrainingParameters": ".classifiertrainingparameters", + "ClassifierTrainingParametersTypedDict": ".classifiertrainingparameters", + "ClassifierTrainingParametersIn": ".classifiertrainingparametersin", + "ClassifierTrainingParametersInTypedDict": ".classifiertrainingparametersin", + "CodeInterpreterTool": ".codeinterpretertool", + "CodeInterpreterToolType": ".codeinterpretertool", + "CodeInterpreterToolTypedDict": ".codeinterpretertool", + "CompletionArgs": ".completionargs", + "CompletionArgsTypedDict": ".completionargs", + "CompletionArgsStop": ".completionargsstop", + "CompletionArgsStopTypedDict": ".completionargsstop", + "CompletionChunk": ".completionchunk", + "CompletionChunkTypedDict": ".completionchunk", + "CompletionDetailedJobOut": ".completiondetailedjobout", + "CompletionDetailedJobOutIntegrations": ".completiondetailedjobout", + "CompletionDetailedJobOutIntegrationsTypedDict": ".completiondetailedjobout", + "CompletionDetailedJobOutJobType": ".completiondetailedjobout", + "CompletionDetailedJobOutObject": ".completiondetailedjobout", + "CompletionDetailedJobOutRepositories": ".completiondetailedjobout", + "CompletionDetailedJobOutRepositoriesTypedDict": ".completiondetailedjobout", + "CompletionDetailedJobOutStatus": ".completiondetailedjobout", + "CompletionDetailedJobOutTypedDict": ".completiondetailedjobout", + "CompletionEvent": ".completionevent", + "CompletionEventTypedDict": ".completionevent", + "CompletionFTModelOut": ".completionftmodelout", + "CompletionFTModelOutObject": ".completionftmodelout", + "CompletionFTModelOutTypedDict": ".completionftmodelout", + "ModelType": ".completionftmodelout", + "CompletionJobOut": ".completionjobout", + "CompletionJobOutObject": ".completionjobout", + "CompletionJobOutTypedDict": ".completionjobout", + "Integrations": ".completionjobout", + "IntegrationsTypedDict": ".completionjobout", + "JobType": ".completionjobout", + "Repositories": ".completionjobout", + "RepositoriesTypedDict": ".completionjobout", + "Status": ".completionjobout", + "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", + "CompletionTrainingParameters": ".completiontrainingparameters", + "CompletionTrainingParametersTypedDict": ".completiontrainingparameters", + "CompletionTrainingParametersIn": ".completiontrainingparametersin", + "CompletionTrainingParametersInTypedDict": ".completiontrainingparametersin", + "ContentChunk": ".contentchunk", + "ContentChunkTypedDict": ".contentchunk", + "ConversationAppendRequest": ".conversationappendrequest", + "ConversationAppendRequestHandoffExecution": ".conversationappendrequest", + "ConversationAppendRequestTypedDict": ".conversationappendrequest", + "ConversationAppendStreamRequest": ".conversationappendstreamrequest", + "ConversationAppendStreamRequestHandoffExecution": ".conversationappendstreamrequest", + "ConversationAppendStreamRequestTypedDict": ".conversationappendstreamrequest", + "ConversationEvents": ".conversationevents", + "ConversationEventsData": ".conversationevents", + "ConversationEventsDataTypedDict": ".conversationevents", + "ConversationEventsTypedDict": ".conversationevents", + "ConversationHistory": ".conversationhistory", + "ConversationHistoryObject": ".conversationhistory", + "ConversationHistoryTypedDict": ".conversationhistory", + "Entries": ".conversationhistory", + "EntriesTypedDict": ".conversationhistory", + "ConversationInputs": ".conversationinputs", + "ConversationInputsTypedDict": ".conversationinputs", + "ConversationMessages": ".conversationmessages", + "ConversationMessagesObject": ".conversationmessages", + "ConversationMessagesTypedDict": ".conversationmessages", + "AgentVersion": ".conversationrequest", + "AgentVersionTypedDict": ".conversationrequest", + "ConversationRequest": ".conversationrequest", + "ConversationRequestTypedDict": ".conversationrequest", + "HandoffExecution": ".conversationrequest", + "Tools": ".conversationrequest", + "ToolsTypedDict": ".conversationrequest", + "ConversationResponse": ".conversationresponse", + "ConversationResponseObject": ".conversationresponse", + "ConversationResponseTypedDict": ".conversationresponse", + "Outputs": ".conversationresponse", + "OutputsTypedDict": ".conversationresponse", + "ConversationRestartRequest": ".conversationrestartrequest", + "ConversationRestartRequestAgentVersion": ".conversationrestartrequest", + "ConversationRestartRequestAgentVersionTypedDict": ".conversationrestartrequest", + "ConversationRestartRequestHandoffExecution": ".conversationrestartrequest", + "ConversationRestartRequestTypedDict": ".conversationrestartrequest", + "ConversationRestartStreamRequest": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestAgentVersion": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestAgentVersionTypedDict": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestHandoffExecution": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestTypedDict": ".conversationrestartstreamrequest", + "ConversationStreamRequest": ".conversationstreamrequest", + "ConversationStreamRequestAgentVersion": ".conversationstreamrequest", + "ConversationStreamRequestAgentVersionTypedDict": ".conversationstreamrequest", + "ConversationStreamRequestHandoffExecution": ".conversationstreamrequest", + "ConversationStreamRequestTools": ".conversationstreamrequest", + "ConversationStreamRequestToolsTypedDict": ".conversationstreamrequest", + "ConversationStreamRequestTypedDict": ".conversationstreamrequest", + "ConversationUsageInfo": ".conversationusageinfo", + "ConversationUsageInfoTypedDict": ".conversationusageinfo", + "DeleteModelV1ModelsModelIDDeleteRequest": ".delete_model_v1_models_model_id_deleteop", + "DeleteModelV1ModelsModelIDDeleteRequestTypedDict": ".delete_model_v1_models_model_id_deleteop", + "DeleteFileOut": ".deletefileout", + "DeleteFileOutTypedDict": ".deletefileout", + "DeleteModelOut": ".deletemodelout", + "DeleteModelOutTypedDict": ".deletemodelout", + "Content": ".deltamessage", + "ContentTypedDict": ".deltamessage", + "DeltaMessage": ".deltamessage", + "DeltaMessageTypedDict": ".deltamessage", + "DocumentLibraryTool": ".documentlibrarytool", + "DocumentLibraryToolType": ".documentlibrarytool", + "DocumentLibraryToolTypedDict": ".documentlibrarytool", + "DocumentOut": ".documentout", + "DocumentOutTypedDict": ".documentout", + "DocumentTextContent": ".documenttextcontent", + "DocumentTextContentTypedDict": ".documenttextcontent", + "Attributes": ".documentupdatein", + "AttributesTypedDict": ".documentupdatein", + "DocumentUpdateIn": ".documentupdatein", + "DocumentUpdateInTypedDict": ".documentupdatein", + "DocumentURLChunk": ".documenturlchunk", + "DocumentURLChunkType": ".documenturlchunk", + "DocumentURLChunkTypedDict": ".documenturlchunk", + "EmbeddingDtype": ".embeddingdtype", + "EmbeddingRequest": ".embeddingrequest", + "EmbeddingRequestInputs": ".embeddingrequest", + "EmbeddingRequestInputsTypedDict": ".embeddingrequest", + "EmbeddingRequestTypedDict": ".embeddingrequest", + "EmbeddingResponse": ".embeddingresponse", + "EmbeddingResponseTypedDict": ".embeddingresponse", + "EmbeddingResponseData": ".embeddingresponsedata", + "EmbeddingResponseDataTypedDict": ".embeddingresponsedata", + "EncodingFormat": ".encodingformat", + "EntityType": ".entitytype", + "EventOut": ".eventout", + "EventOutTypedDict": ".eventout", + "File": ".file", + "FileTypedDict": ".file", + "FileChunk": ".filechunk", + "FileChunkTypedDict": ".filechunk", + "FilePurpose": ".filepurpose", + "FilesAPIRoutesDeleteFileRequest": ".files_api_routes_delete_fileop", + "FilesAPIRoutesDeleteFileRequestTypedDict": ".files_api_routes_delete_fileop", + "FilesAPIRoutesDownloadFileRequest": ".files_api_routes_download_fileop", + "FilesAPIRoutesDownloadFileRequestTypedDict": ".files_api_routes_download_fileop", + "FilesAPIRoutesGetSignedURLRequest": ".files_api_routes_get_signed_urlop", + "FilesAPIRoutesGetSignedURLRequestTypedDict": ".files_api_routes_get_signed_urlop", + "FilesAPIRoutesListFilesRequest": ".files_api_routes_list_filesop", + "FilesAPIRoutesListFilesRequestTypedDict": ".files_api_routes_list_filesop", + "FilesAPIRoutesRetrieveFileRequest": ".files_api_routes_retrieve_fileop", + "FilesAPIRoutesRetrieveFileRequestTypedDict": ".files_api_routes_retrieve_fileop", + "FilesAPIRoutesUploadFileMultiPartBodyParams": ".files_api_routes_upload_fileop", + "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict": ".files_api_routes_upload_fileop", + "FileSchema": ".fileschema", + "FileSchemaTypedDict": ".fileschema", + "FileSignedURL": ".filesignedurl", + "FileSignedURLTypedDict": ".filesignedurl", + "FIMCompletionRequest": ".fimcompletionrequest", + "FIMCompletionRequestStop": ".fimcompletionrequest", + "FIMCompletionRequestStopTypedDict": ".fimcompletionrequest", + "FIMCompletionRequestTypedDict": ".fimcompletionrequest", + "FIMCompletionResponse": ".fimcompletionresponse", + "FIMCompletionResponseTypedDict": ".fimcompletionresponse", + "FIMCompletionStreamRequest": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStop": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStopTypedDict": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestTypedDict": ".fimcompletionstreamrequest", + "FineTuneableModelType": ".finetuneablemodeltype", + "FTClassifierLossFunction": ".ftclassifierlossfunction", + "FTModelCapabilitiesOut": ".ftmodelcapabilitiesout", + "FTModelCapabilitiesOutTypedDict": ".ftmodelcapabilitiesout", + "FTModelCard": ".ftmodelcard", + "FTModelCardType": ".ftmodelcard", + "FTModelCardTypedDict": ".ftmodelcard", + "Function": ".function", + "FunctionTypedDict": ".function", + "Arguments": ".functioncall", + "ArgumentsTypedDict": ".functioncall", + "FunctionCall": ".functioncall", + "FunctionCallTypedDict": ".functioncall", + "FunctionCallEntry": ".functioncallentry", + "FunctionCallEntryObject": ".functioncallentry", + "FunctionCallEntryType": ".functioncallentry", + "FunctionCallEntryTypedDict": ".functioncallentry", + "FunctionCallEntryArguments": ".functioncallentryarguments", + "FunctionCallEntryArgumentsTypedDict": ".functioncallentryarguments", + "FunctionCallEvent": ".functioncallevent", + "FunctionCallEventType": ".functioncallevent", + "FunctionCallEventTypedDict": ".functioncallevent", + "FunctionName": ".functionname", + "FunctionNameTypedDict": ".functionname", + "FunctionResultEntry": ".functionresultentry", + "FunctionResultEntryObject": ".functionresultentry", + "FunctionResultEntryType": ".functionresultentry", + "FunctionResultEntryTypedDict": ".functionresultentry", + "FunctionTool": ".functiontool", + "FunctionToolType": ".functiontool", + "FunctionToolTypedDict": ".functiontool", + "GithubRepositoryIn": ".githubrepositoryin", + "GithubRepositoryInType": ".githubrepositoryin", + "GithubRepositoryInTypedDict": ".githubrepositoryin", + "GithubRepositoryOut": ".githubrepositoryout", + "GithubRepositoryOutType": ".githubrepositoryout", + "GithubRepositoryOutTypedDict": ".githubrepositoryout", + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "ImageGenerationTool": ".imagegenerationtool", + "ImageGenerationToolType": ".imagegenerationtool", + "ImageGenerationToolTypedDict": ".imagegenerationtool", + "ImageURL": ".imageurl", + "ImageURLTypedDict": ".imageurl", + "ImageURLChunk": ".imageurlchunk", + "ImageURLChunkImageURL": ".imageurlchunk", + "ImageURLChunkImageURLTypedDict": ".imageurlchunk", + "ImageURLChunkType": ".imageurlchunk", + "ImageURLChunkTypedDict": ".imageurlchunk", + "InputEntries": ".inputentries", + "InputEntriesTypedDict": ".inputentries", + "Inputs": ".inputs", + "InputsTypedDict": ".inputs", + "InstructRequestInputs": ".inputs", + "InstructRequestInputsMessages": ".inputs", + "InstructRequestInputsMessagesTypedDict": ".inputs", + "InstructRequestInputsTypedDict": ".inputs", + "InstructRequest": ".instructrequest", + "InstructRequestMessages": ".instructrequest", + "InstructRequestMessagesTypedDict": ".instructrequest", + "InstructRequestTypedDict": ".instructrequest", + "Hyperparameters": ".jobin", + "HyperparametersTypedDict": ".jobin", + "JobIn": ".jobin", + "JobInIntegrations": ".jobin", + "JobInIntegrationsTypedDict": ".jobin", + "JobInRepositories": ".jobin", + "JobInRepositoriesTypedDict": ".jobin", + "JobInTypedDict": ".jobin", + "JobMetadataOut": ".jobmetadataout", + "JobMetadataOutTypedDict": ".jobmetadataout", + "JobsAPIRoutesBatchCancelBatchJobRequest": ".jobs_api_routes_batch_cancel_batch_jobop", + "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict": ".jobs_api_routes_batch_cancel_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobRequest": ".jobs_api_routes_batch_get_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobsRequest": ".jobs_api_routes_batch_get_batch_jobsop", + "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobsop", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequest": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponse": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "Response1": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "Response1TypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobResponse": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "QueryParamStatus": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningStartFineTuningJobRequest": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobResponse": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsOut": ".jobsout", + "JobsOutData": ".jobsout", + "JobsOutDataTypedDict": ".jobsout", + "JobsOutObject": ".jobsout", + "JobsOutTypedDict": ".jobsout", + "JSONSchema": ".jsonschema", + "JSONSchemaTypedDict": ".jsonschema", + "LegacyJobMetadataOut": ".legacyjobmetadataout", + "LegacyJobMetadataOutObject": ".legacyjobmetadataout", + "LegacyJobMetadataOutTypedDict": ".legacyjobmetadataout", + "LibrariesDeleteV1Request": ".libraries_delete_v1op", + "LibrariesDeleteV1RequestTypedDict": ".libraries_delete_v1op", + "LibrariesDocumentsDeleteV1Request": ".libraries_documents_delete_v1op", + "LibrariesDocumentsDeleteV1RequestTypedDict": ".libraries_documents_delete_v1op", + "LibrariesDocumentsGetExtractedTextSignedURLV1Request": ".libraries_documents_get_extracted_text_signed_url_v1op", + "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict": ".libraries_documents_get_extracted_text_signed_url_v1op", + "LibrariesDocumentsGetSignedURLV1Request": ".libraries_documents_get_signed_url_v1op", + "LibrariesDocumentsGetSignedURLV1RequestTypedDict": ".libraries_documents_get_signed_url_v1op", + "LibrariesDocumentsGetStatusV1Request": ".libraries_documents_get_status_v1op", + "LibrariesDocumentsGetStatusV1RequestTypedDict": ".libraries_documents_get_status_v1op", + "LibrariesDocumentsGetTextContentV1Request": ".libraries_documents_get_text_content_v1op", + "LibrariesDocumentsGetTextContentV1RequestTypedDict": ".libraries_documents_get_text_content_v1op", + "LibrariesDocumentsGetV1Request": ".libraries_documents_get_v1op", + "LibrariesDocumentsGetV1RequestTypedDict": ".libraries_documents_get_v1op", + "LibrariesDocumentsListV1Request": ".libraries_documents_list_v1op", + "LibrariesDocumentsListV1RequestTypedDict": ".libraries_documents_list_v1op", + "LibrariesDocumentsReprocessV1Request": ".libraries_documents_reprocess_v1op", + "LibrariesDocumentsReprocessV1RequestTypedDict": ".libraries_documents_reprocess_v1op", + "LibrariesDocumentsUpdateV1Request": ".libraries_documents_update_v1op", + "LibrariesDocumentsUpdateV1RequestTypedDict": ".libraries_documents_update_v1op", + "LibrariesDocumentsUploadV1DocumentUpload": ".libraries_documents_upload_v1op", + "LibrariesDocumentsUploadV1DocumentUploadTypedDict": ".libraries_documents_upload_v1op", + "LibrariesDocumentsUploadV1Request": ".libraries_documents_upload_v1op", + "LibrariesDocumentsUploadV1RequestTypedDict": ".libraries_documents_upload_v1op", + "LibrariesGetV1Request": ".libraries_get_v1op", + "LibrariesGetV1RequestTypedDict": ".libraries_get_v1op", + "LibrariesShareCreateV1Request": ".libraries_share_create_v1op", + "LibrariesShareCreateV1RequestTypedDict": ".libraries_share_create_v1op", + "LibrariesShareDeleteV1Request": ".libraries_share_delete_v1op", + "LibrariesShareDeleteV1RequestTypedDict": ".libraries_share_delete_v1op", + "LibrariesShareListV1Request": ".libraries_share_list_v1op", + "LibrariesShareListV1RequestTypedDict": ".libraries_share_list_v1op", + "LibrariesUpdateV1Request": ".libraries_update_v1op", + "LibrariesUpdateV1RequestTypedDict": ".libraries_update_v1op", + "LibraryIn": ".libraryin", + "LibraryInTypedDict": ".libraryin", + "LibraryInUpdate": ".libraryinupdate", + "LibraryInUpdateTypedDict": ".libraryinupdate", + "LibraryOut": ".libraryout", + "LibraryOutTypedDict": ".libraryout", + "ListDocumentOut": ".listdocumentout", + "ListDocumentOutTypedDict": ".listdocumentout", + "ListFilesOut": ".listfilesout", + "ListFilesOutTypedDict": ".listfilesout", + "ListLibraryOut": ".listlibraryout", + "ListLibraryOutTypedDict": ".listlibraryout", + "ListSharingOut": ".listsharingout", + "ListSharingOutTypedDict": ".listsharingout", + "MessageEntries": ".messageentries", + "MessageEntriesTypedDict": ".messageentries", + "MessageInputContentChunks": ".messageinputcontentchunks", + "MessageInputContentChunksTypedDict": ".messageinputcontentchunks", + "MessageInputEntry": ".messageinputentry", + "MessageInputEntryContent": ".messageinputentry", + "MessageInputEntryContentTypedDict": ".messageinputentry", + "MessageInputEntryRole": ".messageinputentry", + "MessageInputEntryType": ".messageinputentry", + "MessageInputEntryTypedDict": ".messageinputentry", + "Object": ".messageinputentry", + "MessageOutputContentChunks": ".messageoutputcontentchunks", + "MessageOutputContentChunksTypedDict": ".messageoutputcontentchunks", + "MessageOutputEntry": ".messageoutputentry", + "MessageOutputEntryContent": ".messageoutputentry", + "MessageOutputEntryContentTypedDict": ".messageoutputentry", + "MessageOutputEntryObject": ".messageoutputentry", + "MessageOutputEntryRole": ".messageoutputentry", + "MessageOutputEntryType": ".messageoutputentry", + "MessageOutputEntryTypedDict": ".messageoutputentry", + "MessageOutputEvent": ".messageoutputevent", + "MessageOutputEventContent": ".messageoutputevent", + "MessageOutputEventContentTypedDict": ".messageoutputevent", + "MessageOutputEventRole": ".messageoutputevent", + "MessageOutputEventType": ".messageoutputevent", + "MessageOutputEventTypedDict": ".messageoutputevent", + "MetricOut": ".metricout", + "MetricOutTypedDict": ".metricout", + "MistralPromptMode": ".mistralpromptmode", + "ModelCapabilities": ".modelcapabilities", + "ModelCapabilitiesTypedDict": ".modelcapabilities", + "ModelConversation": ".modelconversation", + "ModelConversationObject": ".modelconversation", + "ModelConversationTools": ".modelconversation", + "ModelConversationToolsTypedDict": ".modelconversation", + "ModelConversationTypedDict": ".modelconversation", + "Data": ".modellist", + "DataTypedDict": ".modellist", + "ModelList": ".modellist", + "ModelListTypedDict": ".modellist", + "ModerationObject": ".moderationobject", + "ModerationObjectTypedDict": ".moderationobject", + "ModerationResponse": ".moderationresponse", + "ModerationResponseTypedDict": ".moderationresponse", + "NoResponseError": ".no_response_error", + "OCRImageObject": ".ocrimageobject", + "OCRImageObjectTypedDict": ".ocrimageobject", + "OCRPageDimensions": ".ocrpagedimensions", + "OCRPageDimensionsTypedDict": ".ocrpagedimensions", + "OCRPageObject": ".ocrpageobject", + "OCRPageObjectTypedDict": ".ocrpageobject", + "Document": ".ocrrequest", + "DocumentTypedDict": ".ocrrequest", + "OCRRequest": ".ocrrequest", + "OCRRequestTypedDict": ".ocrrequest", + "TableFormat": ".ocrrequest", + "OCRResponse": ".ocrresponse", + "OCRResponseTypedDict": ".ocrresponse", + "Format": ".ocrtableobject", + "OCRTableObject": ".ocrtableobject", + "OCRTableObjectTypedDict": ".ocrtableobject", + "OCRUsageInfo": ".ocrusageinfo", + "OCRUsageInfoTypedDict": ".ocrusageinfo", + "OutputContentChunks": ".outputcontentchunks", + "OutputContentChunksTypedDict": ".outputcontentchunks", + "PaginationInfo": ".paginationinfo", + "PaginationInfoTypedDict": ".paginationinfo", + "Prediction": ".prediction", + "PredictionTypedDict": ".prediction", + "ProcessingStatusOut": ".processingstatusout", + "ProcessingStatusOutTypedDict": ".processingstatusout", + "RealtimeTranscriptionError": ".realtimetranscriptionerror", + "RealtimeTranscriptionErrorTypedDict": ".realtimetranscriptionerror", + "Message": ".realtimetranscriptionerrordetail", + "MessageTypedDict": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionErrorDetail": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionErrorDetailTypedDict": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionSession": ".realtimetranscriptionsession", + "RealtimeTranscriptionSessionTypedDict": ".realtimetranscriptionsession", + "RealtimeTranscriptionSessionCreated": ".realtimetranscriptionsessioncreated", + "RealtimeTranscriptionSessionCreatedTypedDict": ".realtimetranscriptionsessioncreated", + "RealtimeTranscriptionSessionUpdated": ".realtimetranscriptionsessionupdated", + "RealtimeTranscriptionSessionUpdatedTypedDict": ".realtimetranscriptionsessionupdated", + "ReferenceChunk": ".referencechunk", + "ReferenceChunkType": ".referencechunk", + "ReferenceChunkTypedDict": ".referencechunk", + "RequestSource": ".requestsource", + "ResponseDoneEvent": ".responsedoneevent", + "ResponseDoneEventType": ".responsedoneevent", + "ResponseDoneEventTypedDict": ".responsedoneevent", + "ResponseErrorEvent": ".responseerrorevent", + "ResponseErrorEventType": ".responseerrorevent", + "ResponseErrorEventTypedDict": ".responseerrorevent", + "ResponseFormat": ".responseformat", + "ResponseFormatTypedDict": ".responseformat", + "ResponseFormats": ".responseformats", + "ResponseStartedEvent": ".responsestartedevent", + "ResponseStartedEventType": ".responsestartedevent", + "ResponseStartedEventTypedDict": ".responsestartedevent", + "ResponseValidationError": ".responsevalidationerror", + "RetrieveModelV1ModelsModelIDGetRequest": ".retrieve_model_v1_models_model_id_getop", + "RetrieveModelV1ModelsModelIDGetRequestTypedDict": ".retrieve_model_v1_models_model_id_getop", + "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", + "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrieve_model_v1_models_model_id_getop", + "RetrieveFileOut": ".retrievefileout", + "RetrieveFileOutTypedDict": ".retrievefileout", + "SampleType": ".sampletype", + "SDKError": ".sdkerror", + "Security": ".security", + "SecurityTypedDict": ".security", + "ShareEnum": ".shareenum", + "SharingDelete": ".sharingdelete", + "SharingDeleteTypedDict": ".sharingdelete", + "SharingIn": ".sharingin", + "SharingInTypedDict": ".sharingin", + "SharingOut": ".sharingout", + "SharingOutTypedDict": ".sharingout", + "Source": ".source", + "SSETypes": ".ssetypes", + "Role": ".systemmessage", + "SystemMessage": ".systemmessage", + "SystemMessageContent": ".systemmessage", + "SystemMessageContentTypedDict": ".systemmessage", + "SystemMessageTypedDict": ".systemmessage", + "SystemMessageContentChunks": ".systemmessagecontentchunks", + "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", + "TextChunk": ".textchunk", + "TextChunkType": ".textchunk", + "TextChunkTypedDict": ".textchunk", + "ThinkChunk": ".thinkchunk", + "ThinkChunkType": ".thinkchunk", + "ThinkChunkTypedDict": ".thinkchunk", + "Thinking": ".thinkchunk", + "ThinkingTypedDict": ".thinkchunk", + "TimestampGranularity": ".timestampgranularity", + "Tool": ".tool", + "ToolTypedDict": ".tool", + "ToolCall": ".toolcall", + "ToolCallTypedDict": ".toolcall", + "ToolChoice": ".toolchoice", + "ToolChoiceTypedDict": ".toolchoice", + "ToolChoiceEnum": ".toolchoiceenum", + "ToolExecutionDeltaEvent": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventName": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventNameTypedDict": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventType": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventTypedDict": ".toolexecutiondeltaevent", + "ToolExecutionDoneEvent": ".toolexecutiondoneevent", + "ToolExecutionDoneEventName": ".toolexecutiondoneevent", + "ToolExecutionDoneEventNameTypedDict": ".toolexecutiondoneevent", + "ToolExecutionDoneEventType": ".toolexecutiondoneevent", + "ToolExecutionDoneEventTypedDict": ".toolexecutiondoneevent", + "Name": ".toolexecutionentry", + "NameTypedDict": ".toolexecutionentry", + "ToolExecutionEntry": ".toolexecutionentry", + "ToolExecutionEntryObject": ".toolexecutionentry", + "ToolExecutionEntryType": ".toolexecutionentry", + "ToolExecutionEntryTypedDict": ".toolexecutionentry", + "ToolExecutionStartedEvent": ".toolexecutionstartedevent", + "ToolExecutionStartedEventName": ".toolexecutionstartedevent", + "ToolExecutionStartedEventNameTypedDict": ".toolexecutionstartedevent", + "ToolExecutionStartedEventType": ".toolexecutionstartedevent", + "ToolExecutionStartedEventTypedDict": ".toolexecutionstartedevent", + "ToolFileChunk": ".toolfilechunk", + "ToolFileChunkTool": ".toolfilechunk", + "ToolFileChunkToolTypedDict": ".toolfilechunk", + "ToolFileChunkType": ".toolfilechunk", + "ToolFileChunkTypedDict": ".toolfilechunk", + "ToolMessage": ".toolmessage", + "ToolMessageContent": ".toolmessage", + "ToolMessageContentTypedDict": ".toolmessage", + "ToolMessageRole": ".toolmessage", + "ToolMessageTypedDict": ".toolmessage", + "ToolReferenceChunk": ".toolreferencechunk", + "ToolReferenceChunkTool": ".toolreferencechunk", + "ToolReferenceChunkToolTypedDict": ".toolreferencechunk", + "ToolReferenceChunkType": ".toolreferencechunk", + "ToolReferenceChunkTypedDict": ".toolreferencechunk", + "ToolTypes": ".tooltypes", + "TrainingFile": ".trainingfile", + "TrainingFileTypedDict": ".trainingfile", + "TranscriptionResponse": ".transcriptionresponse", + "TranscriptionResponseTypedDict": ".transcriptionresponse", + "TranscriptionSegmentChunk": ".transcriptionsegmentchunk", + "TranscriptionSegmentChunkTypedDict": ".transcriptionsegmentchunk", + "Type": ".transcriptionsegmentchunk", + "TranscriptionStreamDone": ".transcriptionstreamdone", + "TranscriptionStreamDoneType": ".transcriptionstreamdone", + "TranscriptionStreamDoneTypedDict": ".transcriptionstreamdone", + "TranscriptionStreamEvents": ".transcriptionstreamevents", + "TranscriptionStreamEventsData": ".transcriptionstreamevents", + "TranscriptionStreamEventsDataTypedDict": ".transcriptionstreamevents", + "TranscriptionStreamEventsTypedDict": ".transcriptionstreamevents", + "TranscriptionStreamEventTypes": ".transcriptionstreameventtypes", + "TranscriptionStreamLanguage": ".transcriptionstreamlanguage", + "TranscriptionStreamLanguageType": ".transcriptionstreamlanguage", + "TranscriptionStreamLanguageTypedDict": ".transcriptionstreamlanguage", + "TranscriptionStreamSegmentDelta": ".transcriptionstreamsegmentdelta", + "TranscriptionStreamSegmentDeltaType": ".transcriptionstreamsegmentdelta", + "TranscriptionStreamSegmentDeltaTypedDict": ".transcriptionstreamsegmentdelta", + "TranscriptionStreamTextDelta": ".transcriptionstreamtextdelta", + "TranscriptionStreamTextDeltaType": ".transcriptionstreamtextdelta", + "TranscriptionStreamTextDeltaTypedDict": ".transcriptionstreamtextdelta", + "UnarchiveFTModelOut": ".unarchiveftmodelout", + "UnarchiveFTModelOutObject": ".unarchiveftmodelout", + "UnarchiveFTModelOutTypedDict": ".unarchiveftmodelout", + "UpdateFTModelIn": ".updateftmodelin", + "UpdateFTModelInTypedDict": ".updateftmodelin", + "UploadFileOut": ".uploadfileout", + "UploadFileOutTypedDict": ".uploadfileout", + "UsageInfo": ".usageinfo", + "UsageInfoTypedDict": ".usageinfo", + "UserMessage": ".usermessage", + "UserMessageContent": ".usermessage", + "UserMessageContentTypedDict": ".usermessage", + "UserMessageRole": ".usermessage", + "UserMessageTypedDict": ".usermessage", + "Loc": ".validationerror", + "LocTypedDict": ".validationerror", + "ValidationError": ".validationerror", + "ValidationErrorTypedDict": ".validationerror", + "WandbIntegration": ".wandbintegration", + "WandbIntegrationType": ".wandbintegration", + "WandbIntegrationTypedDict": ".wandbintegration", + "WandbIntegrationOut": ".wandbintegrationout", + "WandbIntegrationOutType": ".wandbintegrationout", + "WandbIntegrationOutTypedDict": ".wandbintegrationout", + "WebSearchPremiumTool": ".websearchpremiumtool", + "WebSearchPremiumToolType": ".websearchpremiumtool", + "WebSearchPremiumToolTypedDict": ".websearchpremiumtool", + "WebSearchTool": ".websearchtool", + "WebSearchToolType": ".websearchtool", + "WebSearchToolTypedDict": ".websearchtool", +} + + +def dynamic_import(modname, retries=3): + for attempt in range(retries): + try: + return import_module(modname, __package__) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError( + f"No {attr_name} found in _dynamic_imports for module name -> {__name__} " + ) + + try: + module = dynamic_import(module_name) + result = getattr(module, attr_name) + return result + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + +def __dir__(): + lazy_attrs = builtins.list(_dynamic_imports.keys()) + return builtins.sorted(lazy_attrs) diff --git a/src/mistralai/client/models/agent.py b/src/mistralai/client/models/agent.py new file mode 100644 index 00000000..3bedb3a3 --- /dev/null +++ b/src/mistralai/client/models/agent.py @@ -0,0 +1,148 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentToolsTypedDict = TypeAliasType( + "AgentToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +AgentTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +AgentObject = Literal["agent",] + + +class AgentTypedDict(TypedDict): + model: str + name: str + id: str + version: int + versions: List[int] + created_at: datetime + updated_at: datetime + deployment_chat: bool + source: str + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[AgentToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + description: NotRequired[Nullable[str]] + handoffs: NotRequired[Nullable[List[str]]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + object: NotRequired[AgentObject] + + +class Agent(BaseModel): + model: str + + name: str + + id: str + + version: int + + versions: List[int] + + created_at: datetime + + updated_at: datetime + + deployment_chat: bool + + source: str + + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[AgentTools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + description: OptionalNullable[str] = UNSET + + handoffs: OptionalNullable[List[str]] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + object: Optional[AgentObject] = "agent" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "instructions", + "tools", + "completion_args", + "description", + "handoffs", + "metadata", + "object", + ] + nullable_fields = ["instructions", "description", "handoffs", "metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agentaliasresponse.py b/src/mistralai/client/models/agentaliasresponse.py new file mode 100644 index 00000000..4bc8225c --- /dev/null +++ b/src/mistralai/client/models/agentaliasresponse.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class AgentAliasResponseTypedDict(TypedDict): + alias: str + version: int + created_at: datetime + updated_at: datetime + + +class AgentAliasResponse(BaseModel): + alias: str + + version: int + + created_at: datetime + + updated_at: datetime diff --git a/src/mistralai/client/models/agentconversation.py b/src/mistralai/client/models/agentconversation.py new file mode 100644 index 00000000..5dfa8c31 --- /dev/null +++ b/src/mistralai/client/models/agentconversation.py @@ -0,0 +1,95 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +AgentConversationObject = Literal["conversation",] + + +AgentConversationAgentVersionTypedDict = TypeAliasType( + "AgentConversationAgentVersionTypedDict", Union[str, int] +) + + +AgentConversationAgentVersion = TypeAliasType( + "AgentConversationAgentVersion", Union[str, int] +) + + +class AgentConversationTypedDict(TypedDict): + id: str + created_at: datetime + updated_at: datetime + agent_id: str + name: NotRequired[Nullable[str]] + r"""Name given to the conversation.""" + description: NotRequired[Nullable[str]] + r"""Description of the what the conversation is about.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + object: NotRequired[AgentConversationObject] + agent_version: NotRequired[Nullable[AgentConversationAgentVersionTypedDict]] + + +class AgentConversation(BaseModel): + id: str + + created_at: datetime + + updated_at: datetime + + agent_id: str + + name: OptionalNullable[str] = UNSET + r"""Name given to the conversation.""" + + description: OptionalNullable[str] = UNSET + r"""Description of the what the conversation is about.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + object: Optional[AgentConversationObject] = "conversation" + + agent_version: OptionalNullable[AgentConversationAgentVersion] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["name", "description", "metadata", "object", "agent_version"] + nullable_fields = ["name", "description", "metadata", "agent_version"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agentcreationrequest.py b/src/mistralai/client/models/agentcreationrequest.py new file mode 100644 index 00000000..61a5aff5 --- /dev/null +++ b/src/mistralai/client/models/agentcreationrequest.py @@ -0,0 +1,119 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentCreationRequestToolsTypedDict = TypeAliasType( + "AgentCreationRequestToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +AgentCreationRequestTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class AgentCreationRequestTypedDict(TypedDict): + model: str + name: str + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[AgentCreationRequestToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + description: NotRequired[Nullable[str]] + handoffs: NotRequired[Nullable[List[str]]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + + +class AgentCreationRequest(BaseModel): + model: str + + name: str + + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[AgentCreationRequestTools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + description: OptionalNullable[str] = UNSET + + handoffs: OptionalNullable[List[str]] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "instructions", + "tools", + "completion_args", + "description", + "handoffs", + "metadata", + ] + nullable_fields = ["instructions", "description", "handoffs", "metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agenthandoffdoneevent.py b/src/mistralai/client/models/agenthandoffdoneevent.py new file mode 100644 index 00000000..c826aa5e --- /dev/null +++ b/src/mistralai/client/models/agenthandoffdoneevent.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +AgentHandoffDoneEventType = Literal["agent.handoff.done",] + + +class AgentHandoffDoneEventTypedDict(TypedDict): + id: str + next_agent_id: str + next_agent_name: str + type: NotRequired[AgentHandoffDoneEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class AgentHandoffDoneEvent(BaseModel): + id: str + + next_agent_id: str + + next_agent_name: str + + type: Optional[AgentHandoffDoneEventType] = "agent.handoff.done" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/client/models/agenthandoffentry.py b/src/mistralai/client/models/agenthandoffentry.py new file mode 100644 index 00000000..0b0de13f --- /dev/null +++ b/src/mistralai/client/models/agenthandoffentry.py @@ -0,0 +1,82 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +AgentHandoffEntryObject = Literal["entry",] + + +AgentHandoffEntryType = Literal["agent.handoff",] + + +class AgentHandoffEntryTypedDict(TypedDict): + previous_agent_id: str + previous_agent_name: str + next_agent_id: str + next_agent_name: str + object: NotRequired[AgentHandoffEntryObject] + type: NotRequired[AgentHandoffEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + + +class AgentHandoffEntry(BaseModel): + previous_agent_id: str + + previous_agent_name: str + + next_agent_id: str + + next_agent_name: str + + object: Optional[AgentHandoffEntryObject] = "entry" + + type: Optional[AgentHandoffEntryType] = "agent.handoff" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "type", "created_at", "completed_at", "id"] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agenthandoffstartedevent.py b/src/mistralai/client/models/agenthandoffstartedevent.py new file mode 100644 index 00000000..4b8ff1e5 --- /dev/null +++ b/src/mistralai/client/models/agenthandoffstartedevent.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +AgentHandoffStartedEventType = Literal["agent.handoff.started",] + + +class AgentHandoffStartedEventTypedDict(TypedDict): + id: str + previous_agent_id: str + previous_agent_name: str + type: NotRequired[AgentHandoffStartedEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class AgentHandoffStartedEvent(BaseModel): + id: str + + previous_agent_id: str + + previous_agent_name: str + + type: Optional[AgentHandoffStartedEventType] = "agent.handoff.started" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py b/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py new file mode 100644 index 00000000..33da325c --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict(TypedDict): + agent_id: str + alias: str + version: int + + +class AgentsAPIV1AgentsCreateOrUpdateAliasRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + alias: Annotated[ + str, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] + + version: Annotated[ + int, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_deleteop.py b/src/mistralai/client/models/agents_api_v1_agents_deleteop.py new file mode 100644 index 00000000..58fe902f --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_deleteop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsDeleteRequestTypedDict(TypedDict): + agent_id: str + + +class AgentsAPIV1AgentsDeleteRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py b/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py new file mode 100644 index 00000000..edcccda1 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsGetVersionRequestTypedDict(TypedDict): + agent_id: str + version: str + + +class AgentsAPIV1AgentsGetVersionRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + version: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_getop.py b/src/mistralai/client/models/agents_api_v1_agents_getop.py new file mode 100644 index 00000000..d4817457 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_getop.py @@ -0,0 +1,68 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +QueryParamAgentVersionTypedDict = TypeAliasType( + "QueryParamAgentVersionTypedDict", Union[int, str] +) + + +QueryParamAgentVersion = TypeAliasType("QueryParamAgentVersion", Union[int, str]) + + +class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): + agent_id: str + agent_version: NotRequired[Nullable[QueryParamAgentVersionTypedDict]] + + +class AgentsAPIV1AgentsGetRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + agent_version: Annotated[ + OptionalNullable[QueryParamAgentVersion], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["agent_version"] + nullable_fields = ["agent_version"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py b/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py new file mode 100644 index 00000000..b9770fff --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsListVersionAliasesRequestTypedDict(TypedDict): + agent_id: str + + +class AgentsAPIV1AgentsListVersionAliasesRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py b/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py new file mode 100644 index 00000000..813335f9 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AgentsAPIV1AgentsListVersionsRequestTypedDict(TypedDict): + agent_id: str + page: NotRequired[int] + r"""Page number (0-indexed)""" + page_size: NotRequired[int] + r"""Number of versions per page""" + + +class AgentsAPIV1AgentsListVersionsRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + r"""Page number (0-indexed)""" + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 20 + r"""Number of versions per page""" diff --git a/src/mistralai/client/models/agents_api_v1_agents_listop.py b/src/mistralai/client/models/agents_api_v1_agents_listop.py new file mode 100644 index 00000000..119f5123 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_listop.py @@ -0,0 +1,104 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .requestsource import RequestSource +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Any, Dict, List, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AgentsAPIV1AgentsListRequestTypedDict(TypedDict): + page: NotRequired[int] + r"""Page number (0-indexed)""" + page_size: NotRequired[int] + r"""Number of agents per page""" + deployment_chat: NotRequired[Nullable[bool]] + sources: NotRequired[Nullable[List[RequestSource]]] + name: NotRequired[Nullable[str]] + id: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + + +class AgentsAPIV1AgentsListRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + r"""Page number (0-indexed)""" + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 20 + r"""Number of agents per page""" + + deployment_chat: Annotated[ + OptionalNullable[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + sources: Annotated[ + OptionalNullable[List[RequestSource]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + name: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + id: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + metadata: Annotated[ + OptionalNullable[Dict[str, Any]], + FieldMetadata(query=QueryParamMetadata(serialization="json")), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "page", + "page_size", + "deployment_chat", + "sources", + "name", + "id", + "metadata", + ] + nullable_fields = ["deployment_chat", "sources", "name", "id", "metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py b/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py new file mode 100644 index 00000000..116f952b --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsUpdateVersionRequestTypedDict(TypedDict): + agent_id: str + version: int + + +class AgentsAPIV1AgentsUpdateVersionRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + version: Annotated[ + int, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_updateop.py b/src/mistralai/client/models/agents_api_v1_agents_updateop.py new file mode 100644 index 00000000..116acaa7 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_updateop.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agentupdaterequest import AgentUpdateRequest, AgentUpdateRequestTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsUpdateRequestTypedDict(TypedDict): + agent_id: str + agent_update_request: AgentUpdateRequestTypedDict + + +class AgentsAPIV1AgentsUpdateRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + agent_update_request: Annotated[ + AgentUpdateRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py b/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py new file mode 100644 index 00000000..9f00ffd4 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationappendstreamrequest import ( + ConversationAppendStreamRequest, + ConversationAppendStreamRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsAppendStreamRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation to which we append entries.""" + conversation_append_stream_request: ConversationAppendStreamRequestTypedDict + + +class AgentsAPIV1ConversationsAppendStreamRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation to which we append entries.""" + + conversation_append_stream_request: Annotated[ + ConversationAppendStreamRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_appendop.py b/src/mistralai/client/models/agents_api_v1_conversations_appendop.py new file mode 100644 index 00000000..13d07ba9 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_appendop.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationappendrequest import ( + ConversationAppendRequest, + ConversationAppendRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsAppendRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation to which we append entries.""" + conversation_append_request: ConversationAppendRequestTypedDict + + +class AgentsAPIV1ConversationsAppendRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation to which we append entries.""" + + conversation_append_request: Annotated[ + ConversationAppendRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py b/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py new file mode 100644 index 00000000..81066f90 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsDeleteRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation from which we are fetching metadata.""" + + +class AgentsAPIV1ConversationsDeleteRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation from which we are fetching metadata.""" diff --git a/src/mistralai/client/models/agents_api_v1_conversations_getop.py b/src/mistralai/client/models/agents_api_v1_conversations_getop.py new file mode 100644 index 00000000..c919f99e --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_getop.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agentconversation import AgentConversation, AgentConversationTypedDict +from .modelconversation import ModelConversation, ModelConversationTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation from which we are fetching metadata.""" + + +class AgentsAPIV1ConversationsGetRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation from which we are fetching metadata.""" + + +AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict = TypeAliasType( + "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict", + Union[AgentConversationTypedDict, ModelConversationTypedDict], +) +r"""Successful Response""" + + +AgentsAPIV1ConversationsGetResponseV1ConversationsGet = TypeAliasType( + "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", + Union[AgentConversation, ModelConversation], +) +r"""Successful Response""" diff --git a/src/mistralai/client/models/agents_api_v1_conversations_historyop.py b/src/mistralai/client/models/agents_api_v1_conversations_historyop.py new file mode 100644 index 00000000..ba1f8890 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_historyop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsHistoryRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation from which we are fetching entries.""" + + +class AgentsAPIV1ConversationsHistoryRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation from which we are fetching entries.""" diff --git a/src/mistralai/client/models/agents_api_v1_conversations_listop.py b/src/mistralai/client/models/agents_api_v1_conversations_listop.py new file mode 100644 index 00000000..bb3c7127 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_listop.py @@ -0,0 +1,80 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agentconversation import AgentConversation, AgentConversationTypedDict +from .modelconversation import ModelConversation, ModelConversationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Any, Dict, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +class AgentsAPIV1ConversationsListRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + metadata: NotRequired[Nullable[Dict[str, Any]]] + + +class AgentsAPIV1ConversationsListRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + metadata: Annotated[ + OptionalNullable[Dict[str, Any]], + FieldMetadata(query=QueryParamMetadata(serialization="json")), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["page", "page_size", "metadata"] + nullable_fields = ["metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +ResponseBodyTypedDict = TypeAliasType( + "ResponseBodyTypedDict", + Union[AgentConversationTypedDict, ModelConversationTypedDict], +) + + +ResponseBody = TypeAliasType( + "ResponseBody", Union[AgentConversation, ModelConversation] +) diff --git a/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py b/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py new file mode 100644 index 00000000..e05728f2 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation from which we are fetching messages.""" + + +class AgentsAPIV1ConversationsMessagesRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation from which we are fetching messages.""" diff --git a/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py b/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py new file mode 100644 index 00000000..9b489ab4 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationrestartstreamrequest import ( + ConversationRestartStreamRequest, + ConversationRestartStreamRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the original conversation which is being restarted.""" + conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict + + +class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the original conversation which is being restarted.""" + + conversation_restart_stream_request: Annotated[ + ConversationRestartStreamRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_restartop.py b/src/mistralai/client/models/agents_api_v1_conversations_restartop.py new file mode 100644 index 00000000..8bce3ce5 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_restartop.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationrestartrequest import ( + ConversationRestartRequest, + ConversationRestartRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the original conversation which is being restarted.""" + conversation_restart_request: ConversationRestartRequestTypedDict + + +class AgentsAPIV1ConversationsRestartRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the original conversation which is being restarted.""" + + conversation_restart_request: Annotated[ + ConversationRestartRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/agentscompletionrequest.py b/src/mistralai/client/models/agentscompletionrequest.py new file mode 100644 index 00000000..22368e44 --- /dev/null +++ b/src/mistralai/client/models/agentscompletionrequest.py @@ -0,0 +1,198 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentsCompletionRequestStopTypedDict = TypeAliasType( + "AgentsCompletionRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionRequestStop = TypeAliasType( + "AgentsCompletionRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionRequestMessagesTypedDict = TypeAliasType( + "AgentsCompletionRequestMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +AgentsCompletionRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +AgentsCompletionRequestToolChoiceTypedDict = TypeAliasType( + "AgentsCompletionRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) + + +AgentsCompletionRequestToolChoice = TypeAliasType( + "AgentsCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) + + +class AgentsCompletionRequestTypedDict(TypedDict): + messages: List[AgentsCompletionRequestMessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + agent_id: str + r"""The ID of the agent to use for this completion.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[AgentsCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[AgentsCompletionRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + +class AgentsCompletionRequest(BaseModel): + messages: List[AgentsCompletionRequestMessages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + agent_id: str + r"""The ID of the agent to use for this completion.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + + stop: Optional[AgentsCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + + tool_choice: Optional[AgentsCompletionRequestToolChoice] = None + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + nullable_fields = [ + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agentscompletionstreamrequest.py b/src/mistralai/client/models/agentscompletionstreamrequest.py new file mode 100644 index 00000000..37d46c79 --- /dev/null +++ b/src/mistralai/client/models/agentscompletionstreamrequest.py @@ -0,0 +1,196 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentsCompletionStreamRequestStopTypedDict = TypeAliasType( + "AgentsCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionStreamRequestStop = TypeAliasType( + "AgentsCompletionStreamRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionStreamRequestMessagesTypedDict = TypeAliasType( + "AgentsCompletionStreamRequestMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +AgentsCompletionStreamRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +AgentsCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( + "AgentsCompletionStreamRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) + + +AgentsCompletionStreamRequestToolChoice = TypeAliasType( + "AgentsCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) + + +class AgentsCompletionStreamRequestTypedDict(TypedDict): + messages: List[AgentsCompletionStreamRequestMessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + agent_id: str + r"""The ID of the agent to use for this completion.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + stop: NotRequired[AgentsCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[AgentsCompletionStreamRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + +class AgentsCompletionStreamRequest(BaseModel): + messages: List[AgentsCompletionStreamRequestMessages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + agent_id: str + r"""The ID of the agent to use for this completion.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = True + + stop: Optional[AgentsCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + + tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = None + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + nullable_fields = [ + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agentupdaterequest.py b/src/mistralai/client/models/agentupdaterequest.py new file mode 100644 index 00000000..261ac069 --- /dev/null +++ b/src/mistralai/client/models/agentupdaterequest.py @@ -0,0 +1,133 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentUpdateRequestToolsTypedDict = TypeAliasType( + "AgentUpdateRequestToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +AgentUpdateRequestTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class AgentUpdateRequestTypedDict(TypedDict): + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[AgentUpdateRequestToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + model: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + handoffs: NotRequired[Nullable[List[str]]] + deployment_chat: NotRequired[Nullable[bool]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + + +class AgentUpdateRequest(BaseModel): + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[AgentUpdateRequestTools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + model: OptionalNullable[str] = UNSET + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + handoffs: OptionalNullable[List[str]] = UNSET + + deployment_chat: OptionalNullable[bool] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "instructions", + "tools", + "completion_args", + "model", + "name", + "description", + "handoffs", + "deployment_chat", + "metadata", + ] + nullable_fields = [ + "instructions", + "model", + "name", + "description", + "handoffs", + "deployment_chat", + "metadata", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/apiendpoint.py b/src/mistralai/client/models/apiendpoint.py new file mode 100644 index 00000000..a6072d56 --- /dev/null +++ b/src/mistralai/client/models/apiendpoint.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +APIEndpoint = Union[ + Literal[ + "/v1/chat/completions", + "/v1/embeddings", + "/v1/fim/completions", + "/v1/moderations", + "/v1/chat/moderations", + "/v1/ocr", + "/v1/classifications", + "/v1/chat/classifications", + "/v1/conversations", + "/v1/audio/transcriptions", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/archiveftmodelout.py b/src/mistralai/client/models/archiveftmodelout.py new file mode 100644 index 00000000..6108c7e1 --- /dev/null +++ b/src/mistralai/client/models/archiveftmodelout.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ArchiveFTModelOutObject = Literal["model",] + + +class ArchiveFTModelOutTypedDict(TypedDict): + id: str + object: NotRequired[ArchiveFTModelOutObject] + archived: NotRequired[bool] + + +class ArchiveFTModelOut(BaseModel): + id: str + + object: Optional[ArchiveFTModelOutObject] = "model" + + archived: Optional[bool] = True diff --git a/src/mistralai/client/models/assistantmessage.py b/src/mistralai/client/models/assistantmessage.py new file mode 100644 index 00000000..3ba14ce7 --- /dev/null +++ b/src/mistralai/client/models/assistantmessage.py @@ -0,0 +1,77 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +AssistantMessageContentTypedDict = TypeAliasType( + "AssistantMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +AssistantMessageContent = TypeAliasType( + "AssistantMessageContent", Union[str, List[ContentChunk]] +) + + +AssistantMessageRole = Literal["assistant",] + + +class AssistantMessageTypedDict(TypedDict): + content: NotRequired[Nullable[AssistantMessageContentTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + role: NotRequired[AssistantMessageRole] + + +class AssistantMessage(BaseModel): + content: OptionalNullable[AssistantMessageContent] = UNSET + + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + + prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + + role: Optional[AssistantMessageRole] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["content", "tool_calls", "prefix", "role"] + nullable_fields = ["content", "tool_calls"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/audiochunk.py b/src/mistralai/client/models/audiochunk.py new file mode 100644 index 00000000..80d836f2 --- /dev/null +++ b/src/mistralai/client/models/audiochunk.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +AudioChunkType = Literal["input_audio",] + + +class AudioChunkTypedDict(TypedDict): + input_audio: str + type: NotRequired[AudioChunkType] + + +class AudioChunk(BaseModel): + input_audio: str + + type: Optional[AudioChunkType] = "input_audio" diff --git a/src/mistralai/client/models/audioencoding.py b/src/mistralai/client/models/audioencoding.py new file mode 100644 index 00000000..557f53ed --- /dev/null +++ b/src/mistralai/client/models/audioencoding.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +AudioEncoding = Union[ + Literal[ + "pcm_s16le", + "pcm_s32le", + "pcm_f16le", + "pcm_f32le", + "pcm_mulaw", + "pcm_alaw", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/audioformat.py b/src/mistralai/client/models/audioformat.py new file mode 100644 index 00000000..7ea10b3a --- /dev/null +++ b/src/mistralai/client/models/audioformat.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .audioencoding import AudioEncoding +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class AudioFormatTypedDict(TypedDict): + encoding: AudioEncoding + sample_rate: int + + +class AudioFormat(BaseModel): + encoding: AudioEncoding + + sample_rate: int diff --git a/src/mistralai/client/models/audiotranscriptionrequest.py b/src/mistralai/client/models/audiotranscriptionrequest.py new file mode 100644 index 00000000..78a37978 --- /dev/null +++ b/src/mistralai/client/models/audiotranscriptionrequest.py @@ -0,0 +1,113 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .file import File, FileTypedDict +from .timestampgranularity import TimestampGranularity +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, MultipartFormMetadata, validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AudioTranscriptionRequestTypedDict(TypedDict): + model: str + r"""ID of the model to be used.""" + file: NotRequired[FileTypedDict] + file_url: NotRequired[Nullable[str]] + r"""Url of a file to be transcribed""" + file_id: NotRequired[Nullable[str]] + r"""ID of a file uploaded to /v1/files""" + language: NotRequired[Nullable[str]] + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + temperature: NotRequired[Nullable[float]] + stream: Literal[False] + diarize: NotRequired[bool] + context_bias: NotRequired[List[str]] + timestamp_granularities: NotRequired[List[TimestampGranularity]] + r"""Granularities of timestamps to include in the response.""" + + +class AudioTranscriptionRequest(BaseModel): + model: Annotated[str, FieldMetadata(multipart=True)] + r"""ID of the model to be used.""" + + file: Annotated[ + Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) + ] = None + + file_url: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Url of a file to be transcribed""" + + file_id: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""ID of a file uploaded to /v1/files""" + + language: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + + temperature: Annotated[OptionalNullable[float], FieldMetadata(multipart=True)] = ( + UNSET + ) + + STREAM: Annotated[ + Annotated[Optional[Literal[False]], AfterValidator(validate_const(False))], + pydantic.Field(alias="stream"), + FieldMetadata(multipart=True), + ] = False + + diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False + + context_bias: Annotated[Optional[List[str]], FieldMetadata(multipart=True)] = None + + timestamp_granularities: Annotated[ + Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) + ] = None + r"""Granularities of timestamps to include in the response.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "file", + "file_url", + "file_id", + "language", + "temperature", + "stream", + "diarize", + "context_bias", + "timestamp_granularities", + ] + nullable_fields = ["file_url", "file_id", "language", "temperature"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/audiotranscriptionrequeststream.py b/src/mistralai/client/models/audiotranscriptionrequeststream.py new file mode 100644 index 00000000..35064361 --- /dev/null +++ b/src/mistralai/client/models/audiotranscriptionrequeststream.py @@ -0,0 +1,111 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .file import File, FileTypedDict +from .timestampgranularity import TimestampGranularity +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, MultipartFormMetadata, validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AudioTranscriptionRequestStreamTypedDict(TypedDict): + model: str + file: NotRequired[FileTypedDict] + file_url: NotRequired[Nullable[str]] + r"""Url of a file to be transcribed""" + file_id: NotRequired[Nullable[str]] + r"""ID of a file uploaded to /v1/files""" + language: NotRequired[Nullable[str]] + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + temperature: NotRequired[Nullable[float]] + stream: Literal[True] + diarize: NotRequired[bool] + context_bias: NotRequired[List[str]] + timestamp_granularities: NotRequired[List[TimestampGranularity]] + r"""Granularities of timestamps to include in the response.""" + + +class AudioTranscriptionRequestStream(BaseModel): + model: Annotated[str, FieldMetadata(multipart=True)] + + file: Annotated[ + Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) + ] = None + + file_url: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Url of a file to be transcribed""" + + file_id: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""ID of a file uploaded to /v1/files""" + + language: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + + temperature: Annotated[OptionalNullable[float], FieldMetadata(multipart=True)] = ( + UNSET + ) + + STREAM: Annotated[ + Annotated[Optional[Literal[True]], AfterValidator(validate_const(True))], + pydantic.Field(alias="stream"), + FieldMetadata(multipart=True), + ] = True + + diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False + + context_bias: Annotated[Optional[List[str]], FieldMetadata(multipart=True)] = None + + timestamp_granularities: Annotated[ + Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) + ] = None + r"""Granularities of timestamps to include in the response.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "file", + "file_url", + "file_id", + "language", + "temperature", + "stream", + "diarize", + "context_bias", + "timestamp_granularities", + ] + nullable_fields = ["file_url", "file_id", "language", "temperature"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/basemodelcard.py b/src/mistralai/client/models/basemodelcard.py new file mode 100644 index 00000000..8ce7f139 --- /dev/null +++ b/src/mistralai/client/models/basemodelcard.py @@ -0,0 +1,116 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +BaseModelCardType = Literal["base",] + + +class BaseModelCardTypedDict(TypedDict): + id: str + capabilities: ModelCapabilitiesTypedDict + object: NotRequired[str] + created: NotRequired[int] + owned_by: NotRequired[str] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + deprecation: NotRequired[Nullable[datetime]] + deprecation_replacement_model: NotRequired[Nullable[str]] + default_model_temperature: NotRequired[Nullable[float]] + type: BaseModelCardType + + +class BaseModelCard(BaseModel): + id: str + + capabilities: ModelCapabilities + + object: Optional[str] = "model" + + created: Optional[int] = None + + owned_by: Optional[str] = "mistralai" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + deprecation: OptionalNullable[datetime] = UNSET + + deprecation_replacement_model: OptionalNullable[str] = UNSET + + default_model_temperature: OptionalNullable[float] = UNSET + + TYPE: Annotated[ + Annotated[Optional[BaseModelCardType], AfterValidator(validate_const("base"))], + pydantic.Field(alias="type"), + ] = "base" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "created", + "owned_by", + "name", + "description", + "max_context_length", + "aliases", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + "type", + ] + nullable_fields = [ + "name", + "description", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/batcherror.py b/src/mistralai/client/models/batcherror.py new file mode 100644 index 00000000..a9c8362b --- /dev/null +++ b/src/mistralai/client/models/batcherror.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class BatchErrorTypedDict(TypedDict): + message: str + count: NotRequired[int] + + +class BatchError(BaseModel): + message: str + + count: Optional[int] = 1 diff --git a/src/mistralai/client/models/batchjobin.py b/src/mistralai/client/models/batchjobin.py new file mode 100644 index 00000000..39cf70b5 --- /dev/null +++ b/src/mistralai/client/models/batchjobin.py @@ -0,0 +1,88 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .apiendpoint import APIEndpoint +from .batchrequest import BatchRequest, BatchRequestTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Dict, List, Optional +from typing_extensions import NotRequired, TypedDict + + +class BatchJobInTypedDict(TypedDict): + endpoint: APIEndpoint + input_files: NotRequired[Nullable[List[str]]] + r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" + requests: NotRequired[Nullable[List[BatchRequestTypedDict]]] + model: NotRequired[Nullable[str]] + r"""The model to be used for batch inference.""" + agent_id: NotRequired[Nullable[str]] + r"""In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here.""" + metadata: NotRequired[Nullable[Dict[str, str]]] + r"""The metadata of your choice to be associated with the batch inference job.""" + timeout_hours: NotRequired[int] + r"""The timeout in hours for the batch inference job.""" + + +class BatchJobIn(BaseModel): + endpoint: APIEndpoint + + input_files: OptionalNullable[List[str]] = UNSET + r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" + + requests: OptionalNullable[List[BatchRequest]] = UNSET + + model: OptionalNullable[str] = UNSET + r"""The model to be used for batch inference.""" + + agent_id: OptionalNullable[str] = UNSET + r"""In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here.""" + + metadata: OptionalNullable[Dict[str, str]] = UNSET + r"""The metadata of your choice to be associated with the batch inference job.""" + + timeout_hours: Optional[int] = 24 + r"""The timeout in hours for the batch inference job.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "input_files", + "requests", + "model", + "agent_id", + "metadata", + "timeout_hours", + ] + nullable_fields = ["input_files", "requests", "model", "agent_id", "metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/batchjobout.py b/src/mistralai/client/models/batchjobout.py new file mode 100644 index 00000000..008d43b4 --- /dev/null +++ b/src/mistralai/client/models/batchjobout.py @@ -0,0 +1,129 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .batcherror import BatchError, BatchErrorTypedDict +from .batchjobstatus import BatchJobStatus +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +BatchJobOutObject = Literal["batch",] + + +class BatchJobOutTypedDict(TypedDict): + id: str + input_files: List[str] + endpoint: str + errors: List[BatchErrorTypedDict] + status: BatchJobStatus + created_at: int + total_requests: int + completed_requests: int + succeeded_requests: int + failed_requests: int + object: NotRequired[BatchJobOutObject] + metadata: NotRequired[Nullable[Dict[str, Any]]] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + output_file: NotRequired[Nullable[str]] + error_file: NotRequired[Nullable[str]] + outputs: NotRequired[Nullable[List[Dict[str, Any]]]] + started_at: NotRequired[Nullable[int]] + completed_at: NotRequired[Nullable[int]] + + +class BatchJobOut(BaseModel): + id: str + + input_files: List[str] + + endpoint: str + + errors: List[BatchError] + + status: BatchJobStatus + + created_at: int + + total_requests: int + + completed_requests: int + + succeeded_requests: int + + failed_requests: int + + object: Optional[BatchJobOutObject] = "batch" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + output_file: OptionalNullable[str] = UNSET + + error_file: OptionalNullable[str] = UNSET + + outputs: OptionalNullable[List[Dict[str, Any]]] = UNSET + + started_at: OptionalNullable[int] = UNSET + + completed_at: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "metadata", + "model", + "agent_id", + "output_file", + "error_file", + "outputs", + "started_at", + "completed_at", + ] + nullable_fields = [ + "metadata", + "model", + "agent_id", + "output_file", + "error_file", + "outputs", + "started_at", + "completed_at", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/batchjobsout.py b/src/mistralai/client/models/batchjobsout.py new file mode 100644 index 00000000..2654dac0 --- /dev/null +++ b/src/mistralai/client/models/batchjobsout.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .batchjobout import BatchJobOut, BatchJobOutTypedDict +from mistralai.client.types import BaseModel +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +BatchJobsOutObject = Literal["list",] + + +class BatchJobsOutTypedDict(TypedDict): + total: int + data: NotRequired[List[BatchJobOutTypedDict]] + object: NotRequired[BatchJobsOutObject] + + +class BatchJobsOut(BaseModel): + total: int + + data: Optional[List[BatchJobOut]] = None + + object: Optional[BatchJobsOutObject] = "list" diff --git a/src/mistralai/client/models/batchjobstatus.py b/src/mistralai/client/models/batchjobstatus.py new file mode 100644 index 00000000..4b28059b --- /dev/null +++ b/src/mistralai/client/models/batchjobstatus.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +BatchJobStatus = Literal[ + "QUEUED", + "RUNNING", + "SUCCESS", + "FAILED", + "TIMEOUT_EXCEEDED", + "CANCELLATION_REQUESTED", + "CANCELLED", +] diff --git a/src/mistralai/client/models/batchrequest.py b/src/mistralai/client/models/batchrequest.py new file mode 100644 index 00000000..24f50a9a --- /dev/null +++ b/src/mistralai/client/models/batchrequest.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict +from typing_extensions import NotRequired, TypedDict + + +class BatchRequestTypedDict(TypedDict): + body: Dict[str, Any] + custom_id: NotRequired[Nullable[str]] + + +class BatchRequest(BaseModel): + body: Dict[str, Any] + + custom_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["custom_id"] + nullable_fields = ["custom_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/builtinconnectors.py b/src/mistralai/client/models/builtinconnectors.py new file mode 100644 index 00000000..6a3b2476 --- /dev/null +++ b/src/mistralai/client/models/builtinconnectors.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +BuiltInConnectors = Literal[ + "web_search", + "web_search_premium", + "code_interpreter", + "image_generation", + "document_library", +] diff --git a/src/mistralai/client/models/chatclassificationrequest.py b/src/mistralai/client/models/chatclassificationrequest.py new file mode 100644 index 00000000..45081022 --- /dev/null +++ b/src/mistralai/client/models/chatclassificationrequest.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .inputs import Inputs, InputsTypedDict +from mistralai.client.types import BaseModel +import pydantic +from typing_extensions import Annotated, TypedDict + + +class ChatClassificationRequestTypedDict(TypedDict): + model: str + inputs: InputsTypedDict + r"""Chat to classify""" + + +class ChatClassificationRequest(BaseModel): + model: str + + inputs: Annotated[Inputs, pydantic.Field(alias="input")] + r"""Chat to classify""" diff --git a/src/mistralai/client/models/chatcompletionchoice.py b/src/mistralai/client/models/chatcompletionchoice.py new file mode 100644 index 00000000..5d888cfd --- /dev/null +++ b/src/mistralai/client/models/chatcompletionchoice.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from mistralai.client.types import BaseModel, UnrecognizedStr +from typing import Literal, Union +from typing_extensions import TypedDict + + +FinishReason = Union[ + Literal[ + "stop", + "length", + "model_length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] + + +class ChatCompletionChoiceTypedDict(TypedDict): + index: int + message: AssistantMessageTypedDict + finish_reason: FinishReason + + +class ChatCompletionChoice(BaseModel): + index: int + + message: AssistantMessage + + finish_reason: FinishReason diff --git a/src/mistralai/client/models/chatcompletionrequest.py b/src/mistralai/client/models/chatcompletionrequest.py new file mode 100644 index 00000000..30fce28d --- /dev/null +++ b/src/mistralai/client/models/chatcompletionrequest.py @@ -0,0 +1,221 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]]) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +Stop = TypeAliasType("Stop", Union[str, List[str]]) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +MessagesTypedDict = TypeAliasType( + "MessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +Messages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatCompletionRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +ChatCompletionRequestToolChoice = TypeAliasType( + "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +class ChatCompletionRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[MessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[StopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionRequest(BaseModel): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + + messages: List[Messages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = None + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + + stop: Optional[Stop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + + tool_choice: Optional[ChatCompletionRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + safe_prompt: Optional[bool] = None + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/chatcompletionresponse.py b/src/mistralai/client/models/chatcompletionresponse.py new file mode 100644 index 00000000..60a1f561 --- /dev/null +++ b/src/mistralai/client/models/chatcompletionresponse.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ChatCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: int + choices: List[ChatCompletionChoiceTypedDict] + + +class ChatCompletionResponse(BaseModel): + id: str + + object: str + + model: str + + usage: UsageInfo + + created: int + + choices: List[ChatCompletionChoice] diff --git a/src/mistralai/client/models/chatcompletionstreamrequest.py b/src/mistralai/client/models/chatcompletionstreamrequest.py new file mode 100644 index 00000000..21dad38b --- /dev/null +++ b/src/mistralai/client/models/chatcompletionstreamrequest.py @@ -0,0 +1,223 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ChatCompletionStreamRequestStopTypedDict = TypeAliasType( + "ChatCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestStop = TypeAliasType( + "ChatCompletionStreamRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestMessagesTypedDict = TypeAliasType( + "ChatCompletionStreamRequestMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +ChatCompletionStreamRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionStreamRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +ChatCompletionStreamRequestToolChoice = TypeAliasType( + "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +class ChatCompletionStreamRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionStreamRequestMessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionStreamRequest(BaseModel): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + + messages: List[ChatCompletionStreamRequestMessages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = None + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = True + + stop: Optional[ChatCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + + tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + safe_prompt: Optional[bool] = None + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/chatmoderationrequest.py b/src/mistralai/client/models/chatmoderationrequest.py new file mode 100644 index 00000000..631c914d --- /dev/null +++ b/src/mistralai/client/models/chatmoderationrequest.py @@ -0,0 +1,83 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import get_discriminator +import pydantic +from pydantic import Discriminator, Tag +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +TwoTypedDict = TypeAliasType( + "TwoTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +Two = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +OneTypedDict = TypeAliasType( + "OneTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +One = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatModerationRequestInputsTypedDict = TypeAliasType( + "ChatModerationRequestInputsTypedDict", + Union[List[OneTypedDict], List[List[TwoTypedDict]]], +) +r"""Chat to classify""" + + +ChatModerationRequestInputs = TypeAliasType( + "ChatModerationRequestInputs", Union[List[One], List[List[Two]]] +) +r"""Chat to classify""" + + +class ChatModerationRequestTypedDict(TypedDict): + inputs: ChatModerationRequestInputsTypedDict + r"""Chat to classify""" + model: str + + +class ChatModerationRequest(BaseModel): + inputs: Annotated[ChatModerationRequestInputs, pydantic.Field(alias="input")] + r"""Chat to classify""" + + model: str diff --git a/src/mistralai/client/models/checkpointout.py b/src/mistralai/client/models/checkpointout.py new file mode 100644 index 00000000..89189ed1 --- /dev/null +++ b/src/mistralai/client/models/checkpointout.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .metricout import MetricOut, MetricOutTypedDict +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class CheckpointOutTypedDict(TypedDict): + metrics: MetricOutTypedDict + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + step_number: int + r"""The step number that the checkpoint was created at.""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" + + +class CheckpointOut(BaseModel): + metrics: MetricOut + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + + step_number: int + r"""The step number that the checkpoint was created at.""" + + created_at: int + r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" diff --git a/src/mistralai/client/models/classificationrequest.py b/src/mistralai/client/models/classificationrequest.py new file mode 100644 index 00000000..c724ff53 --- /dev/null +++ b/src/mistralai/client/models/classificationrequest.py @@ -0,0 +1,74 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, List, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ClassificationRequestInputsTypedDict = TypeAliasType( + "ClassificationRequestInputsTypedDict", Union[str, List[str]] +) +r"""Text to classify.""" + + +ClassificationRequestInputs = TypeAliasType( + "ClassificationRequestInputs", Union[str, List[str]] +) +r"""Text to classify.""" + + +class ClassificationRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use.""" + inputs: ClassificationRequestInputsTypedDict + r"""Text to classify.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + + +class ClassificationRequest(BaseModel): + model: str + r"""ID of the model to use.""" + + inputs: Annotated[ClassificationRequestInputs, pydantic.Field(alias="input")] + r"""Text to classify.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["metadata"] + nullable_fields = ["metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/classificationresponse.py b/src/mistralai/client/models/classificationresponse.py new file mode 100644 index 00000000..4bc21a58 --- /dev/null +++ b/src/mistralai/client/models/classificationresponse.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classificationtargetresult import ( + ClassificationTargetResult, + ClassificationTargetResultTypedDict, +) +from mistralai.client.types import BaseModel +from typing import Dict, List +from typing_extensions import TypedDict + + +class ClassificationResponseTypedDict(TypedDict): + id: str + model: str + results: List[Dict[str, ClassificationTargetResultTypedDict]] + + +class ClassificationResponse(BaseModel): + id: str + + model: str + + results: List[Dict[str, ClassificationTargetResult]] diff --git a/src/mistralai/client/models/classificationtargetresult.py b/src/mistralai/client/models/classificationtargetresult.py new file mode 100644 index 00000000..89a137c3 --- /dev/null +++ b/src/mistralai/client/models/classificationtargetresult.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Dict +from typing_extensions import TypedDict + + +class ClassificationTargetResultTypedDict(TypedDict): + scores: Dict[str, float] + + +class ClassificationTargetResult(BaseModel): + scores: Dict[str, float] diff --git a/src/mistralai/client/models/classifierdetailedjobout.py b/src/mistralai/client/models/classifierdetailedjobout.py new file mode 100644 index 00000000..1de4534f --- /dev/null +++ b/src/mistralai/client/models/classifierdetailedjobout.py @@ -0,0 +1,164 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .checkpointout import CheckpointOut, CheckpointOutTypedDict +from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, +) +from .eventout import EventOut, EventOutTypedDict +from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict +from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ClassifierDetailedJobOutStatus = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] + + +ClassifierDetailedJobOutObject = Literal["job",] + + +ClassifierDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict + + +ClassifierDetailedJobOutIntegrations = WandbIntegrationOut + + +ClassifierDetailedJobOutJobType = Literal["classifier",] + + +class ClassifierDetailedJobOutTypedDict(TypedDict): + id: str + auto_start: bool + model: str + r"""The name of the model to fine-tune.""" + status: ClassifierDetailedJobOutStatus + created_at: int + modified_at: int + training_files: List[str] + hyperparameters: ClassifierTrainingParametersTypedDict + classifier_targets: List[ClassifierTargetOutTypedDict] + validation_files: NotRequired[Nullable[List[str]]] + object: NotRequired[ClassifierDetailedJobOutObject] + fine_tuned_model: NotRequired[Nullable[str]] + suffix: NotRequired[Nullable[str]] + integrations: NotRequired[ + Nullable[List[ClassifierDetailedJobOutIntegrationsTypedDict]] + ] + trained_tokens: NotRequired[Nullable[int]] + metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + job_type: NotRequired[ClassifierDetailedJobOutJobType] + events: NotRequired[List[EventOutTypedDict]] + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: NotRequired[List[CheckpointOutTypedDict]] + + +class ClassifierDetailedJobOut(BaseModel): + id: str + + auto_start: bool + + model: str + r"""The name of the model to fine-tune.""" + + status: ClassifierDetailedJobOutStatus + + created_at: int + + modified_at: int + + training_files: List[str] + + hyperparameters: ClassifierTrainingParameters + + classifier_targets: List[ClassifierTargetOut] + + validation_files: OptionalNullable[List[str]] = UNSET + + object: Optional[ClassifierDetailedJobOutObject] = "job" + + fine_tuned_model: OptionalNullable[str] = UNSET + + suffix: OptionalNullable[str] = UNSET + + integrations: OptionalNullable[List[ClassifierDetailedJobOutIntegrations]] = UNSET + + trained_tokens: OptionalNullable[int] = UNSET + + metadata: OptionalNullable[JobMetadataOut] = UNSET + + job_type: Optional[ClassifierDetailedJobOutJobType] = "classifier" + + events: Optional[List[EventOut]] = None + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + + checkpoints: Optional[List[CheckpointOut]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "job_type", + "events", + "checkpoints", + ] + nullable_fields = [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/classifierftmodelout.py b/src/mistralai/client/models/classifierftmodelout.py new file mode 100644 index 00000000..a4572108 --- /dev/null +++ b/src/mistralai/client/models/classifierftmodelout.py @@ -0,0 +1,114 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict +from .ftmodelcapabilitiesout import ( + FTModelCapabilitiesOut, + FTModelCapabilitiesOutTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ClassifierFTModelOutObject = Literal["model",] + + +ClassifierFTModelOutModelType = Literal["classifier",] + + +class ClassifierFTModelOutTypedDict(TypedDict): + id: str + created: int + owned_by: str + workspace_id: str + root: str + root_version: str + archived: bool + capabilities: FTModelCapabilitiesOutTypedDict + job: str + classifier_targets: List[ClassifierTargetOutTypedDict] + object: NotRequired[ClassifierFTModelOutObject] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + model_type: NotRequired[ClassifierFTModelOutModelType] + + +class ClassifierFTModelOut(BaseModel): + id: str + + created: int + + owned_by: str + + workspace_id: str + + root: str + + root_version: str + + archived: bool + + capabilities: FTModelCapabilitiesOut + + job: str + + classifier_targets: List[ClassifierTargetOut] + + object: Optional[ClassifierFTModelOutObject] = "model" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + model_type: Optional[ClassifierFTModelOutModelType] = "classifier" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "name", + "description", + "max_context_length", + "aliases", + "model_type", + ] + nullable_fields = ["name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/classifierjobout.py b/src/mistralai/client/models/classifierjobout.py new file mode 100644 index 00000000..ab1e261d --- /dev/null +++ b/src/mistralai/client/models/classifierjobout.py @@ -0,0 +1,173 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, +) +from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict +from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ClassifierJobOutStatus = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] +r"""The current status of the fine-tuning job.""" + + +ClassifierJobOutObject = Literal["job",] +r"""The object type of the fine-tuning job.""" + + +ClassifierJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict + + +ClassifierJobOutIntegrations = WandbIntegrationOut + + +ClassifierJobOutJobType = Literal["classifier",] +r"""The type of job (`FT` for fine-tuning).""" + + +class ClassifierJobOutTypedDict(TypedDict): + id: str + r"""The ID of the job.""" + auto_start: bool + model: str + r"""The name of the model to fine-tune.""" + status: ClassifierJobOutStatus + r"""The current status of the fine-tuning job.""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + hyperparameters: ClassifierTrainingParametersTypedDict + validation_files: NotRequired[Nullable[List[str]]] + r"""A list containing the IDs of uploaded files that contain validation data.""" + object: NotRequired[ClassifierJobOutObject] + r"""The object type of the fine-tuning job.""" + fine_tuned_model: NotRequired[Nullable[str]] + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + integrations: NotRequired[Nullable[List[ClassifierJobOutIntegrationsTypedDict]]] + r"""A list of integrations enabled for your fine-tuning job.""" + trained_tokens: NotRequired[Nullable[int]] + r"""Total number of tokens trained.""" + metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + job_type: NotRequired[ClassifierJobOutJobType] + r"""The type of job (`FT` for fine-tuning).""" + + +class ClassifierJobOut(BaseModel): + id: str + r"""The ID of the job.""" + + auto_start: bool + + model: str + r"""The name of the model to fine-tune.""" + + status: ClassifierJobOutStatus + r"""The current status of the fine-tuning job.""" + + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + + hyperparameters: ClassifierTrainingParameters + + validation_files: OptionalNullable[List[str]] = UNSET + r"""A list containing the IDs of uploaded files that contain validation data.""" + + object: Optional[ClassifierJobOutObject] = "job" + r"""The object type of the fine-tuning job.""" + + fine_tuned_model: OptionalNullable[str] = UNSET + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + integrations: OptionalNullable[List[ClassifierJobOutIntegrations]] = UNSET + r"""A list of integrations enabled for your fine-tuning job.""" + + trained_tokens: OptionalNullable[int] = UNSET + r"""Total number of tokens trained.""" + + metadata: OptionalNullable[JobMetadataOut] = UNSET + + job_type: Optional[ClassifierJobOutJobType] = "classifier" + r"""The type of job (`FT` for fine-tuning).""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "job_type", + ] + nullable_fields = [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/classifiertargetin.py b/src/mistralai/client/models/classifiertargetin.py new file mode 100644 index 00000000..231ee21e --- /dev/null +++ b/src/mistralai/client/models/classifiertargetin.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ftclassifierlossfunction import FTClassifierLossFunction +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassifierTargetInTypedDict(TypedDict): + name: str + labels: List[str] + weight: NotRequired[float] + loss_function: NotRequired[Nullable[FTClassifierLossFunction]] + + +class ClassifierTargetIn(BaseModel): + name: str + + labels: List[str] + + weight: Optional[float] = 1 + + loss_function: OptionalNullable[FTClassifierLossFunction] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["weight", "loss_function"] + nullable_fields = ["loss_function"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/classifiertargetout.py b/src/mistralai/client/models/classifiertargetout.py new file mode 100644 index 00000000..957104a7 --- /dev/null +++ b/src/mistralai/client/models/classifiertargetout.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ftclassifierlossfunction import FTClassifierLossFunction +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ClassifierTargetOutTypedDict(TypedDict): + name: str + labels: List[str] + weight: float + loss_function: FTClassifierLossFunction + + +class ClassifierTargetOut(BaseModel): + name: str + + labels: List[str] + + weight: float + + loss_function: FTClassifierLossFunction diff --git a/src/mistralai/client/models/classifiertrainingparameters.py b/src/mistralai/client/models/classifiertrainingparameters.py new file mode 100644 index 00000000..60f53c37 --- /dev/null +++ b/src/mistralai/client/models/classifiertrainingparameters.py @@ -0,0 +1,79 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassifierTrainingParametersTypedDict(TypedDict): + training_steps: NotRequired[Nullable[int]] + learning_rate: NotRequired[float] + weight_decay: NotRequired[Nullable[float]] + warmup_fraction: NotRequired[Nullable[float]] + epochs: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] + + +class ClassifierTrainingParameters(BaseModel): + training_steps: OptionalNullable[int] = UNSET + + learning_rate: Optional[float] = 0.0001 + + weight_decay: OptionalNullable[float] = UNSET + + warmup_fraction: OptionalNullable[float] = UNSET + + epochs: OptionalNullable[float] = UNSET + + seq_len: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + nullable_fields = [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/classifiertrainingparametersin.py b/src/mistralai/client/models/classifiertrainingparametersin.py new file mode 100644 index 00000000..e24c9dde --- /dev/null +++ b/src/mistralai/client/models/classifiertrainingparametersin.py @@ -0,0 +1,91 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassifierTrainingParametersInTypedDict(TypedDict): + r"""The fine-tuning hyperparameter settings used in a classifier fine-tune job.""" + + training_steps: NotRequired[Nullable[int]] + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + learning_rate: NotRequired[float] + r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + weight_decay: NotRequired[Nullable[float]] + r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" + warmup_fraction: NotRequired[Nullable[float]] + r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" + epochs: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] + + +class ClassifierTrainingParametersIn(BaseModel): + r"""The fine-tuning hyperparameter settings used in a classifier fine-tune job.""" + + training_steps: OptionalNullable[int] = UNSET + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + + learning_rate: Optional[float] = 0.0001 + r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + + weight_decay: OptionalNullable[float] = UNSET + r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" + + warmup_fraction: OptionalNullable[float] = UNSET + r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" + + epochs: OptionalNullable[float] = UNSET + + seq_len: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + nullable_fields = [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/codeinterpretertool.py b/src/mistralai/client/models/codeinterpretertool.py new file mode 100644 index 00000000..faf5b0b7 --- /dev/null +++ b/src/mistralai/client/models/codeinterpretertool.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +CodeInterpreterToolType = Literal["code_interpreter",] + + +class CodeInterpreterToolTypedDict(TypedDict): + type: NotRequired[CodeInterpreterToolType] + + +class CodeInterpreterTool(BaseModel): + type: Optional[CodeInterpreterToolType] = "code_interpreter" diff --git a/src/mistralai/client/models/completionargs.py b/src/mistralai/client/models/completionargs.py new file mode 100644 index 00000000..010910f6 --- /dev/null +++ b/src/mistralai/client/models/completionargs.py @@ -0,0 +1,107 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .toolchoiceenum import ToolChoiceEnum +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionArgsTypedDict(TypedDict): + r"""White-listed arguments from the completion API""" + + stop: NotRequired[Nullable[CompletionArgsStopTypedDict]] + presence_penalty: NotRequired[Nullable[float]] + frequency_penalty: NotRequired[Nullable[float]] + temperature: NotRequired[Nullable[float]] + top_p: NotRequired[Nullable[float]] + max_tokens: NotRequired[Nullable[int]] + random_seed: NotRequired[Nullable[int]] + prediction: NotRequired[Nullable[PredictionTypedDict]] + response_format: NotRequired[Nullable[ResponseFormatTypedDict]] + tool_choice: NotRequired[ToolChoiceEnum] + + +class CompletionArgs(BaseModel): + r"""White-listed arguments from the completion API""" + + stop: OptionalNullable[CompletionArgsStop] = UNSET + + presence_penalty: OptionalNullable[float] = UNSET + + frequency_penalty: OptionalNullable[float] = UNSET + + temperature: OptionalNullable[float] = UNSET + + top_p: OptionalNullable[float] = UNSET + + max_tokens: OptionalNullable[int] = UNSET + + random_seed: OptionalNullable[int] = UNSET + + prediction: OptionalNullable[Prediction] = UNSET + + response_format: OptionalNullable[ResponseFormat] = UNSET + + tool_choice: Optional[ToolChoiceEnum] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stop", + "presence_penalty", + "frequency_penalty", + "temperature", + "top_p", + "max_tokens", + "random_seed", + "prediction", + "response_format", + "tool_choice", + ] + nullable_fields = [ + "stop", + "presence_penalty", + "frequency_penalty", + "temperature", + "top_p", + "max_tokens", + "random_seed", + "prediction", + "response_format", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/completionargsstop.py b/src/mistralai/client/models/completionargsstop.py new file mode 100644 index 00000000..de7a0956 --- /dev/null +++ b/src/mistralai/client/models/completionargsstop.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import List, Union +from typing_extensions import TypeAliasType + + +CompletionArgsStopTypedDict = TypeAliasType( + "CompletionArgsStopTypedDict", Union[str, List[str]] +) + + +CompletionArgsStop = TypeAliasType("CompletionArgsStop", Union[str, List[str]]) diff --git a/src/mistralai/client/models/completionchunk.py b/src/mistralai/client/models/completionchunk.py new file mode 100644 index 00000000..9790db6f --- /dev/null +++ b/src/mistralai/client/models/completionchunk.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionChunkTypedDict(TypedDict): + id: str + model: str + choices: List[CompletionResponseStreamChoiceTypedDict] + object: NotRequired[str] + created: NotRequired[int] + usage: NotRequired[UsageInfoTypedDict] + + +class CompletionChunk(BaseModel): + id: str + + model: str + + choices: List[CompletionResponseStreamChoice] + + object: Optional[str] = None + + created: Optional[int] = None + + usage: Optional[UsageInfo] = None diff --git a/src/mistralai/client/models/completiondetailedjobout.py b/src/mistralai/client/models/completiondetailedjobout.py new file mode 100644 index 00000000..85c0c803 --- /dev/null +++ b/src/mistralai/client/models/completiondetailedjobout.py @@ -0,0 +1,171 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .checkpointout import CheckpointOut, CheckpointOutTypedDict +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, +) +from .eventout import EventOut, EventOutTypedDict +from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict +from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict +from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +CompletionDetailedJobOutStatus = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] + + +CompletionDetailedJobOutObject = Literal["job",] + + +CompletionDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict + + +CompletionDetailedJobOutIntegrations = WandbIntegrationOut + + +CompletionDetailedJobOutJobType = Literal["completion",] + + +CompletionDetailedJobOutRepositoriesTypedDict = GithubRepositoryOutTypedDict + + +CompletionDetailedJobOutRepositories = GithubRepositoryOut + + +class CompletionDetailedJobOutTypedDict(TypedDict): + id: str + auto_start: bool + model: str + r"""The name of the model to fine-tune.""" + status: CompletionDetailedJobOutStatus + created_at: int + modified_at: int + training_files: List[str] + hyperparameters: CompletionTrainingParametersTypedDict + validation_files: NotRequired[Nullable[List[str]]] + object: NotRequired[CompletionDetailedJobOutObject] + fine_tuned_model: NotRequired[Nullable[str]] + suffix: NotRequired[Nullable[str]] + integrations: NotRequired[ + Nullable[List[CompletionDetailedJobOutIntegrationsTypedDict]] + ] + trained_tokens: NotRequired[Nullable[int]] + metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + job_type: NotRequired[CompletionDetailedJobOutJobType] + repositories: NotRequired[List[CompletionDetailedJobOutRepositoriesTypedDict]] + events: NotRequired[List[EventOutTypedDict]] + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: NotRequired[List[CheckpointOutTypedDict]] + + +class CompletionDetailedJobOut(BaseModel): + id: str + + auto_start: bool + + model: str + r"""The name of the model to fine-tune.""" + + status: CompletionDetailedJobOutStatus + + created_at: int + + modified_at: int + + training_files: List[str] + + hyperparameters: CompletionTrainingParameters + + validation_files: OptionalNullable[List[str]] = UNSET + + object: Optional[CompletionDetailedJobOutObject] = "job" + + fine_tuned_model: OptionalNullable[str] = UNSET + + suffix: OptionalNullable[str] = UNSET + + integrations: OptionalNullable[List[CompletionDetailedJobOutIntegrations]] = UNSET + + trained_tokens: OptionalNullable[int] = UNSET + + metadata: OptionalNullable[JobMetadataOut] = UNSET + + job_type: Optional[CompletionDetailedJobOutJobType] = "completion" + + repositories: Optional[List[CompletionDetailedJobOutRepositories]] = None + + events: Optional[List[EventOut]] = None + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + + checkpoints: Optional[List[CheckpointOut]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "job_type", + "repositories", + "events", + "checkpoints", + ] + nullable_fields = [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/completionevent.py b/src/mistralai/client/models/completionevent.py new file mode 100644 index 00000000..52db911e --- /dev/null +++ b/src/mistralai/client/models/completionevent.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class CompletionEventTypedDict(TypedDict): + data: CompletionChunkTypedDict + + +class CompletionEvent(BaseModel): + data: CompletionChunk diff --git a/src/mistralai/client/models/completionftmodelout.py b/src/mistralai/client/models/completionftmodelout.py new file mode 100644 index 00000000..ccecbb6a --- /dev/null +++ b/src/mistralai/client/models/completionftmodelout.py @@ -0,0 +1,110 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ftmodelcapabilitiesout import ( + FTModelCapabilitiesOut, + FTModelCapabilitiesOutTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +CompletionFTModelOutObject = Literal["model",] + + +ModelType = Literal["completion",] + + +class CompletionFTModelOutTypedDict(TypedDict): + id: str + created: int + owned_by: str + workspace_id: str + root: str + root_version: str + archived: bool + capabilities: FTModelCapabilitiesOutTypedDict + job: str + object: NotRequired[CompletionFTModelOutObject] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + model_type: NotRequired[ModelType] + + +class CompletionFTModelOut(BaseModel): + id: str + + created: int + + owned_by: str + + workspace_id: str + + root: str + + root_version: str + + archived: bool + + capabilities: FTModelCapabilitiesOut + + job: str + + object: Optional[CompletionFTModelOutObject] = "model" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + model_type: Optional[ModelType] = "completion" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "name", + "description", + "max_context_length", + "aliases", + "model_type", + ] + nullable_fields = ["name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/completionjobout.py b/src/mistralai/client/models/completionjobout.py new file mode 100644 index 00000000..ecd95bb9 --- /dev/null +++ b/src/mistralai/client/models/completionjobout.py @@ -0,0 +1,184 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, +) +from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict +from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict +from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +Status = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] +r"""The current status of the fine-tuning job.""" + + +CompletionJobOutObject = Literal["job",] +r"""The object type of the fine-tuning job.""" + + +IntegrationsTypedDict = WandbIntegrationOutTypedDict + + +Integrations = WandbIntegrationOut + + +JobType = Literal["completion",] +r"""The type of job (`FT` for fine-tuning).""" + + +RepositoriesTypedDict = GithubRepositoryOutTypedDict + + +Repositories = GithubRepositoryOut + + +class CompletionJobOutTypedDict(TypedDict): + id: str + r"""The ID of the job.""" + auto_start: bool + model: str + r"""The name of the model to fine-tune.""" + status: Status + r"""The current status of the fine-tuning job.""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + hyperparameters: CompletionTrainingParametersTypedDict + validation_files: NotRequired[Nullable[List[str]]] + r"""A list containing the IDs of uploaded files that contain validation data.""" + object: NotRequired[CompletionJobOutObject] + r"""The object type of the fine-tuning job.""" + fine_tuned_model: NotRequired[Nullable[str]] + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + integrations: NotRequired[Nullable[List[IntegrationsTypedDict]]] + r"""A list of integrations enabled for your fine-tuning job.""" + trained_tokens: NotRequired[Nullable[int]] + r"""Total number of tokens trained.""" + metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + job_type: NotRequired[JobType] + r"""The type of job (`FT` for fine-tuning).""" + repositories: NotRequired[List[RepositoriesTypedDict]] + + +class CompletionJobOut(BaseModel): + id: str + r"""The ID of the job.""" + + auto_start: bool + + model: str + r"""The name of the model to fine-tune.""" + + status: Status + r"""The current status of the fine-tuning job.""" + + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + + hyperparameters: CompletionTrainingParameters + + validation_files: OptionalNullable[List[str]] = UNSET + r"""A list containing the IDs of uploaded files that contain validation data.""" + + object: Optional[CompletionJobOutObject] = "job" + r"""The object type of the fine-tuning job.""" + + fine_tuned_model: OptionalNullable[str] = UNSET + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + integrations: OptionalNullable[List[Integrations]] = UNSET + r"""A list of integrations enabled for your fine-tuning job.""" + + trained_tokens: OptionalNullable[int] = UNSET + r"""Total number of tokens trained.""" + + metadata: OptionalNullable[JobMetadataOut] = UNSET + + job_type: Optional[JobType] = "completion" + r"""The type of job (`FT` for fine-tuning).""" + + repositories: Optional[List[Repositories]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "job_type", + "repositories", + ] + nullable_fields = [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/completionresponsestreamchoice.py b/src/mistralai/client/models/completionresponsestreamchoice.py new file mode 100644 index 00000000..1b8d6fac --- /dev/null +++ b/src/mistralai/client/models/completionresponsestreamchoice.py @@ -0,0 +1,63 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr +from pydantic import model_serializer +from typing import Literal, Union +from typing_extensions import TypedDict + + +CompletionResponseStreamChoiceFinishReason = Union[ + Literal[ + "stop", + "length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] + + +class CompletionResponseStreamChoiceTypedDict(TypedDict): + index: int + delta: DeltaMessageTypedDict + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + +class CompletionResponseStreamChoice(BaseModel): + index: int + + delta: DeltaMessage + + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [] + nullable_fields = ["finish_reason"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/completiontrainingparameters.py b/src/mistralai/client/models/completiontrainingparameters.py new file mode 100644 index 00000000..36b285ab --- /dev/null +++ b/src/mistralai/client/models/completiontrainingparameters.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionTrainingParametersTypedDict(TypedDict): + training_steps: NotRequired[Nullable[int]] + learning_rate: NotRequired[float] + weight_decay: NotRequired[Nullable[float]] + warmup_fraction: NotRequired[Nullable[float]] + epochs: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] + fim_ratio: NotRequired[Nullable[float]] + + +class CompletionTrainingParameters(BaseModel): + training_steps: OptionalNullable[int] = UNSET + + learning_rate: Optional[float] = 0.0001 + + weight_decay: OptionalNullable[float] = UNSET + + warmup_fraction: OptionalNullable[float] = UNSET + + epochs: OptionalNullable[float] = UNSET + + seq_len: OptionalNullable[int] = UNSET + + fim_ratio: OptionalNullable[float] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + "fim_ratio", + ] + nullable_fields = [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + "fim_ratio", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/completiontrainingparametersin.py b/src/mistralai/client/models/completiontrainingparametersin.py new file mode 100644 index 00000000..d0315d99 --- /dev/null +++ b/src/mistralai/client/models/completiontrainingparametersin.py @@ -0,0 +1,96 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionTrainingParametersInTypedDict(TypedDict): + r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" + + training_steps: NotRequired[Nullable[int]] + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + learning_rate: NotRequired[float] + r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + weight_decay: NotRequired[Nullable[float]] + r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" + warmup_fraction: NotRequired[Nullable[float]] + r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" + epochs: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] + fim_ratio: NotRequired[Nullable[float]] + + +class CompletionTrainingParametersIn(BaseModel): + r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" + + training_steps: OptionalNullable[int] = UNSET + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + + learning_rate: Optional[float] = 0.0001 + r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + + weight_decay: OptionalNullable[float] = UNSET + r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" + + warmup_fraction: OptionalNullable[float] = UNSET + r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" + + epochs: OptionalNullable[float] = UNSET + + seq_len: OptionalNullable[int] = UNSET + + fim_ratio: OptionalNullable[float] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + "fim_ratio", + ] + nullable_fields = [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + "fim_ratio", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/contentchunk.py b/src/mistralai/client/models/contentchunk.py new file mode 100644 index 00000000..0a25423f --- /dev/null +++ b/src/mistralai/client/models/contentchunk.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .audiochunk import AudioChunk, AudioChunkTypedDict +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType + + +ContentChunkTypedDict = TypeAliasType( + "ContentChunkTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + ReferenceChunkTypedDict, + FileChunkTypedDict, + AudioChunkTypedDict, + DocumentURLChunkTypedDict, + ThinkChunkTypedDict, + ], +) + + +ContentChunk = Annotated[ + Union[ + Annotated[ImageURLChunk, Tag("image_url")], + Annotated[DocumentURLChunk, Tag("document_url")], + Annotated[TextChunk, Tag("text")], + Annotated[ReferenceChunk, Tag("reference")], + Annotated[FileChunk, Tag("file")], + Annotated[ThinkChunk, Tag("thinking")], + Annotated[AudioChunk, Tag("input_audio")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] diff --git a/src/mistralai/client/models/conversationappendrequest.py b/src/mistralai/client/models/conversationappendrequest.py new file mode 100644 index 00000000..867c0a41 --- /dev/null +++ b/src/mistralai/client/models/conversationappendrequest.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationAppendRequestHandoffExecution = Literal[ + "client", + "server", +] + + +class ConversationAppendRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationAppendRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + + +class ConversationAppendRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = False + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationAppendRequestHandoffExecution] = "server" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/client/models/conversationappendstreamrequest.py b/src/mistralai/client/models/conversationappendstreamrequest.py new file mode 100644 index 00000000..f51407bf --- /dev/null +++ b/src/mistralai/client/models/conversationappendstreamrequest.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationAppendStreamRequestHandoffExecution = Literal[ + "client", + "server", +] + + +class ConversationAppendStreamRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationAppendStreamRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + + +class ConversationAppendStreamRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = True + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationAppendStreamRequestHandoffExecution] = ( + "server" + ) + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/client/models/conversationevents.py b/src/mistralai/client/models/conversationevents.py new file mode 100644 index 00000000..308588a1 --- /dev/null +++ b/src/mistralai/client/models/conversationevents.py @@ -0,0 +1,78 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agenthandoffdoneevent import AgentHandoffDoneEvent, AgentHandoffDoneEventTypedDict +from .agenthandoffstartedevent import ( + AgentHandoffStartedEvent, + AgentHandoffStartedEventTypedDict, +) +from .functioncallevent import FunctionCallEvent, FunctionCallEventTypedDict +from .messageoutputevent import MessageOutputEvent, MessageOutputEventTypedDict +from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict +from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict +from .responsestartedevent import ResponseStartedEvent, ResponseStartedEventTypedDict +from .ssetypes import SSETypes +from .toolexecutiondeltaevent import ( + ToolExecutionDeltaEvent, + ToolExecutionDeltaEventTypedDict, +) +from .toolexecutiondoneevent import ( + ToolExecutionDoneEvent, + ToolExecutionDoneEventTypedDict, +) +from .toolexecutionstartedevent import ( + ToolExecutionStartedEvent, + ToolExecutionStartedEventTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +ConversationEventsDataTypedDict = TypeAliasType( + "ConversationEventsDataTypedDict", + Union[ + ResponseStartedEventTypedDict, + ResponseDoneEventTypedDict, + ResponseErrorEventTypedDict, + ToolExecutionStartedEventTypedDict, + ToolExecutionDeltaEventTypedDict, + ToolExecutionDoneEventTypedDict, + AgentHandoffStartedEventTypedDict, + AgentHandoffDoneEventTypedDict, + FunctionCallEventTypedDict, + MessageOutputEventTypedDict, + ], +) + + +ConversationEventsData = Annotated[ + Union[ + Annotated[AgentHandoffDoneEvent, Tag("agent.handoff.done")], + Annotated[AgentHandoffStartedEvent, Tag("agent.handoff.started")], + Annotated[ResponseDoneEvent, Tag("conversation.response.done")], + Annotated[ResponseErrorEvent, Tag("conversation.response.error")], + Annotated[ResponseStartedEvent, Tag("conversation.response.started")], + Annotated[FunctionCallEvent, Tag("function.call.delta")], + Annotated[MessageOutputEvent, Tag("message.output.delta")], + Annotated[ToolExecutionDeltaEvent, Tag("tool.execution.delta")], + Annotated[ToolExecutionDoneEvent, Tag("tool.execution.done")], + Annotated[ToolExecutionStartedEvent, Tag("tool.execution.started")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class ConversationEventsTypedDict(TypedDict): + event: SSETypes + r"""Server side events sent when streaming a conversation response.""" + data: ConversationEventsDataTypedDict + + +class ConversationEvents(BaseModel): + event: SSETypes + r"""Server side events sent when streaming a conversation response.""" + + data: ConversationEventsData diff --git a/src/mistralai/client/models/conversationhistory.py b/src/mistralai/client/models/conversationhistory.py new file mode 100644 index 00000000..40bd1e72 --- /dev/null +++ b/src/mistralai/client/models/conversationhistory.py @@ -0,0 +1,59 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict +from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict +from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict +from mistralai.client.types import BaseModel +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ConversationHistoryObject = Literal["conversation.history",] + + +EntriesTypedDict = TypeAliasType( + "EntriesTypedDict", + Union[ + FunctionResultEntryTypedDict, + MessageInputEntryTypedDict, + FunctionCallEntryTypedDict, + ToolExecutionEntryTypedDict, + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ], +) + + +Entries = TypeAliasType( + "Entries", + Union[ + FunctionResultEntry, + MessageInputEntry, + FunctionCallEntry, + ToolExecutionEntry, + MessageOutputEntry, + AgentHandoffEntry, + ], +) + + +class ConversationHistoryTypedDict(TypedDict): + r"""Retrieve all entries in a conversation.""" + + conversation_id: str + entries: List[EntriesTypedDict] + object: NotRequired[ConversationHistoryObject] + + +class ConversationHistory(BaseModel): + r"""Retrieve all entries in a conversation.""" + + conversation_id: str + + entries: List[Entries] + + object: Optional[ConversationHistoryObject] = "conversation.history" diff --git a/src/mistralai/client/models/conversationinputs.py b/src/mistralai/client/models/conversationinputs.py new file mode 100644 index 00000000..4d30cd76 --- /dev/null +++ b/src/mistralai/client/models/conversationinputs.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .inputentries import InputEntries, InputEntriesTypedDict +from typing import List, Union +from typing_extensions import TypeAliasType + + +ConversationInputsTypedDict = TypeAliasType( + "ConversationInputsTypedDict", Union[str, List[InputEntriesTypedDict]] +) + + +ConversationInputs = TypeAliasType("ConversationInputs", Union[str, List[InputEntries]]) diff --git a/src/mistralai/client/models/conversationmessages.py b/src/mistralai/client/models/conversationmessages.py new file mode 100644 index 00000000..1ea05369 --- /dev/null +++ b/src/mistralai/client/models/conversationmessages.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .messageentries import MessageEntries, MessageEntriesTypedDict +from mistralai.client.types import BaseModel +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationMessagesObject = Literal["conversation.messages",] + + +class ConversationMessagesTypedDict(TypedDict): + r"""Similar to the conversation history but only keep the messages""" + + conversation_id: str + messages: List[MessageEntriesTypedDict] + object: NotRequired[ConversationMessagesObject] + + +class ConversationMessages(BaseModel): + r"""Similar to the conversation history but only keep the messages""" + + conversation_id: str + + messages: List[MessageEntries] + + object: Optional[ConversationMessagesObject] = "conversation.messages" diff --git a/src/mistralai/client/models/conversationrequest.py b/src/mistralai/client/models/conversationrequest.py new file mode 100644 index 00000000..e3211c4c --- /dev/null +++ b/src/mistralai/client/models/conversationrequest.py @@ -0,0 +1,160 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +HandoffExecution = Literal[ + "client", + "server", +] + + +ToolsTypedDict = TypeAliasType( + "ToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +Tools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +AgentVersionTypedDict = TypeAliasType("AgentVersionTypedDict", Union[str, int]) + + +AgentVersion = TypeAliasType("AgentVersion", Union[str, int]) + + +class ConversationRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[Nullable[bool]] + handoff_execution: NotRequired[Nullable[HandoffExecution]] + instructions: NotRequired[Nullable[str]] + tools: NotRequired[List[ToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + agent_id: NotRequired[Nullable[str]] + agent_version: NotRequired[Nullable[AgentVersionTypedDict]] + model: NotRequired[Nullable[str]] + + +class ConversationRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = False + + store: OptionalNullable[bool] = UNSET + + handoff_execution: OptionalNullable[HandoffExecution] = UNSET + + instructions: OptionalNullable[str] = UNSET + + tools: Optional[List[Tools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: OptionalNullable[CompletionArgs] = UNSET + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + agent_version: OptionalNullable[AgentVersion] = UNSET + + model: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stream", + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + nullable_fields = [ + "store", + "handoff_execution", + "instructions", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationresponse.py b/src/mistralai/client/models/conversationresponse.py new file mode 100644 index 00000000..32d0f28f --- /dev/null +++ b/src/mistralai/client/models/conversationresponse.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict +from mistralai.client.types import BaseModel +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ConversationResponseObject = Literal["conversation.response",] + + +OutputsTypedDict = TypeAliasType( + "OutputsTypedDict", + Union[ + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ], +) + + +Outputs = TypeAliasType( + "Outputs", + Union[ToolExecutionEntry, FunctionCallEntry, MessageOutputEntry, AgentHandoffEntry], +) + + +class ConversationResponseTypedDict(TypedDict): + r"""The response after appending new entries to the conversation.""" + + conversation_id: str + outputs: List[OutputsTypedDict] + usage: ConversationUsageInfoTypedDict + object: NotRequired[ConversationResponseObject] + + +class ConversationResponse(BaseModel): + r"""The response after appending new entries to the conversation.""" + + conversation_id: str + + outputs: List[Outputs] + + usage: ConversationUsageInfo + + object: Optional[ConversationResponseObject] = "conversation.response" diff --git a/src/mistralai/client/models/conversationrestartrequest.py b/src/mistralai/client/models/conversationrestartrequest.py new file mode 100644 index 00000000..aa2bf7b0 --- /dev/null +++ b/src/mistralai/client/models/conversationrestartrequest.py @@ -0,0 +1,113 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ConversationRestartRequestHandoffExecution = Literal[ + "client", + "server", +] + + +ConversationRestartRequestAgentVersionTypedDict = TypeAliasType( + "ConversationRestartRequestAgentVersionTypedDict", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +ConversationRestartRequestAgentVersion = TypeAliasType( + "ConversationRestartRequestAgentVersion", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +class ConversationRestartRequestTypedDict(TypedDict): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + inputs: ConversationInputsTypedDict + from_entry_id: str + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationRestartRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + agent_version: NotRequired[ + Nullable[ConversationRestartRequestAgentVersionTypedDict] + ] + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +class ConversationRestartRequest(BaseModel): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + inputs: ConversationInputs + + from_entry_id: str + + stream: Optional[bool] = False + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationRestartRequestHandoffExecution] = "server" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + agent_version: OptionalNullable[ConversationRestartRequestAgentVersion] = UNSET + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stream", + "store", + "handoff_execution", + "completion_args", + "metadata", + "agent_version", + ] + nullable_fields = ["metadata", "agent_version"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationrestartstreamrequest.py b/src/mistralai/client/models/conversationrestartstreamrequest.py new file mode 100644 index 00000000..689815eb --- /dev/null +++ b/src/mistralai/client/models/conversationrestartstreamrequest.py @@ -0,0 +1,117 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ConversationRestartStreamRequestHandoffExecution = Literal[ + "client", + "server", +] + + +ConversationRestartStreamRequestAgentVersionTypedDict = TypeAliasType( + "ConversationRestartStreamRequestAgentVersionTypedDict", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +ConversationRestartStreamRequestAgentVersion = TypeAliasType( + "ConversationRestartStreamRequestAgentVersion", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +class ConversationRestartStreamRequestTypedDict(TypedDict): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + inputs: ConversationInputsTypedDict + from_entry_id: str + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationRestartStreamRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + agent_version: NotRequired[ + Nullable[ConversationRestartStreamRequestAgentVersionTypedDict] + ] + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +class ConversationRestartStreamRequest(BaseModel): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + inputs: ConversationInputs + + from_entry_id: str + + stream: Optional[bool] = True + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationRestartStreamRequestHandoffExecution] = ( + "server" + ) + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + agent_version: OptionalNullable[ConversationRestartStreamRequestAgentVersion] = ( + UNSET + ) + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stream", + "store", + "handoff_execution", + "completion_args", + "metadata", + "agent_version", + ] + nullable_fields = ["metadata", "agent_version"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationstreamrequest.py b/src/mistralai/client/models/conversationstreamrequest.py new file mode 100644 index 00000000..219230a2 --- /dev/null +++ b/src/mistralai/client/models/conversationstreamrequest.py @@ -0,0 +1,166 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ConversationStreamRequestHandoffExecution = Literal[ + "client", + "server", +] + + +ConversationStreamRequestToolsTypedDict = TypeAliasType( + "ConversationStreamRequestToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +ConversationStreamRequestTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +ConversationStreamRequestAgentVersionTypedDict = TypeAliasType( + "ConversationStreamRequestAgentVersionTypedDict", Union[str, int] +) + + +ConversationStreamRequestAgentVersion = TypeAliasType( + "ConversationStreamRequestAgentVersion", Union[str, int] +) + + +class ConversationStreamRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[Nullable[bool]] + handoff_execution: NotRequired[Nullable[ConversationStreamRequestHandoffExecution]] + instructions: NotRequired[Nullable[str]] + tools: NotRequired[List[ConversationStreamRequestToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + agent_id: NotRequired[Nullable[str]] + agent_version: NotRequired[Nullable[ConversationStreamRequestAgentVersionTypedDict]] + model: NotRequired[Nullable[str]] + + +class ConversationStreamRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = True + + store: OptionalNullable[bool] = UNSET + + handoff_execution: OptionalNullable[ConversationStreamRequestHandoffExecution] = ( + UNSET + ) + + instructions: OptionalNullable[str] = UNSET + + tools: Optional[List[ConversationStreamRequestTools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: OptionalNullable[CompletionArgs] = UNSET + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + agent_version: OptionalNullable[ConversationStreamRequestAgentVersion] = UNSET + + model: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stream", + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + nullable_fields = [ + "store", + "handoff_execution", + "instructions", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationusageinfo.py b/src/mistralai/client/models/conversationusageinfo.py new file mode 100644 index 00000000..7a818c89 --- /dev/null +++ b/src/mistralai/client/models/conversationusageinfo.py @@ -0,0 +1,69 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class ConversationUsageInfoTypedDict(TypedDict): + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + connector_tokens: NotRequired[Nullable[int]] + connectors: NotRequired[Nullable[Dict[str, int]]] + + +class ConversationUsageInfo(BaseModel): + prompt_tokens: Optional[int] = 0 + + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + connector_tokens: OptionalNullable[int] = UNSET + + connectors: OptionalNullable[Dict[str, int]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "connector_tokens", + "connectors", + ] + nullable_fields = ["connector_tokens", "connectors"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py b/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py new file mode 100644 index 00000000..1cd36128 --- /dev/null +++ b/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class DeleteModelV1ModelsModelIDDeleteRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to delete.""" + + +class DeleteModelV1ModelsModelIDDeleteRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to delete.""" diff --git a/src/mistralai/client/models/deletefileout.py b/src/mistralai/client/models/deletefileout.py new file mode 100644 index 00000000..b25538be --- /dev/null +++ b/src/mistralai/client/models/deletefileout.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class DeleteFileOutTypedDict(TypedDict): + id: str + r"""The ID of the deleted file.""" + object: str + r"""The object type that was deleted""" + deleted: bool + r"""The deletion status.""" + + +class DeleteFileOut(BaseModel): + id: str + r"""The ID of the deleted file.""" + + object: str + r"""The object type that was deleted""" + + deleted: bool + r"""The deletion status.""" diff --git a/src/mistralai/client/models/deletemodelout.py b/src/mistralai/client/models/deletemodelout.py new file mode 100644 index 00000000..5aa8b68f --- /dev/null +++ b/src/mistralai/client/models/deletemodelout.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class DeleteModelOutTypedDict(TypedDict): + id: str + r"""The ID of the deleted model.""" + object: NotRequired[str] + r"""The object type that was deleted""" + deleted: NotRequired[bool] + r"""The deletion status""" + + +class DeleteModelOut(BaseModel): + id: str + r"""The ID of the deleted model.""" + + object: Optional[str] = "model" + r"""The object type that was deleted""" + + deleted: Optional[bool] = True + r"""The deletion status""" diff --git a/src/mistralai/client/models/deltamessage.py b/src/mistralai/client/models/deltamessage.py new file mode 100644 index 00000000..0ae56da8 --- /dev/null +++ b/src/mistralai/client/models/deltamessage.py @@ -0,0 +1,67 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ContentTypedDict = TypeAliasType( + "ContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +Content = TypeAliasType("Content", Union[str, List[ContentChunk]]) + + +class DeltaMessageTypedDict(TypedDict): + role: NotRequired[Nullable[str]] + content: NotRequired[Nullable[ContentTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + + +class DeltaMessage(BaseModel): + role: OptionalNullable[str] = UNSET + + content: OptionalNullable[Content] = UNSET + + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["role", "content", "tool_calls"] + nullable_fields = ["role", "content", "tool_calls"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/documentlibrarytool.py b/src/mistralai/client/models/documentlibrarytool.py new file mode 100644 index 00000000..861a58d3 --- /dev/null +++ b/src/mistralai/client/models/documentlibrarytool.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +DocumentLibraryToolType = Literal["document_library",] + + +class DocumentLibraryToolTypedDict(TypedDict): + library_ids: List[str] + r"""Ids of the library in which to search.""" + type: NotRequired[DocumentLibraryToolType] + + +class DocumentLibraryTool(BaseModel): + library_ids: List[str] + r"""Ids of the library in which to search.""" + + type: Optional[DocumentLibraryToolType] = "document_library" diff --git a/src/mistralai/client/models/documentout.py b/src/mistralai/client/models/documentout.py new file mode 100644 index 00000000..39d0aa2a --- /dev/null +++ b/src/mistralai/client/models/documentout.py @@ -0,0 +1,127 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict +from typing_extensions import NotRequired, TypedDict + + +class DocumentOutTypedDict(TypedDict): + id: str + library_id: str + hash: Nullable[str] + mime_type: Nullable[str] + extension: Nullable[str] + size: Nullable[int] + name: str + created_at: datetime + processing_status: str + uploaded_by_id: Nullable[str] + uploaded_by_type: str + tokens_processing_total: int + summary: NotRequired[Nullable[str]] + last_processed_at: NotRequired[Nullable[datetime]] + number_of_pages: NotRequired[Nullable[int]] + tokens_processing_main_content: NotRequired[Nullable[int]] + tokens_processing_summary: NotRequired[Nullable[int]] + url: NotRequired[Nullable[str]] + attributes: NotRequired[Nullable[Dict[str, Any]]] + + +class DocumentOut(BaseModel): + id: str + + library_id: str + + hash: Nullable[str] + + mime_type: Nullable[str] + + extension: Nullable[str] + + size: Nullable[int] + + name: str + + created_at: datetime + + processing_status: str + + uploaded_by_id: Nullable[str] + + uploaded_by_type: str + + tokens_processing_total: int + + summary: OptionalNullable[str] = UNSET + + last_processed_at: OptionalNullable[datetime] = UNSET + + number_of_pages: OptionalNullable[int] = UNSET + + tokens_processing_main_content: OptionalNullable[int] = UNSET + + tokens_processing_summary: OptionalNullable[int] = UNSET + + url: OptionalNullable[str] = UNSET + + attributes: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "summary", + "last_processed_at", + "number_of_pages", + "tokens_processing_main_content", + "tokens_processing_summary", + "url", + "attributes", + ] + nullable_fields = [ + "hash", + "mime_type", + "extension", + "size", + "summary", + "last_processed_at", + "number_of_pages", + "uploaded_by_id", + "tokens_processing_main_content", + "tokens_processing_summary", + "url", + "attributes", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/documenttextcontent.py b/src/mistralai/client/models/documenttextcontent.py new file mode 100644 index 00000000..b1c1aa07 --- /dev/null +++ b/src/mistralai/client/models/documenttextcontent.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class DocumentTextContentTypedDict(TypedDict): + text: str + + +class DocumentTextContent(BaseModel): + text: str diff --git a/src/mistralai/client/models/documentupdatein.py b/src/mistralai/client/models/documentupdatein.py new file mode 100644 index 00000000..02022b89 --- /dev/null +++ b/src/mistralai/client/models/documentupdatein.py @@ -0,0 +1,71 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Dict, List, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +AttributesTypedDict = TypeAliasType( + "AttributesTypedDict", + Union[ + bool, str, int, float, datetime, List[str], List[int], List[float], List[bool] + ], +) + + +Attributes = TypeAliasType( + "Attributes", + Union[ + bool, str, int, float, datetime, List[str], List[int], List[float], List[bool] + ], +) + + +class DocumentUpdateInTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + attributes: NotRequired[Nullable[Dict[str, AttributesTypedDict]]] + + +class DocumentUpdateIn(BaseModel): + name: OptionalNullable[str] = UNSET + + attributes: OptionalNullable[Dict[str, Attributes]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["name", "attributes"] + nullable_fields = ["name", "attributes"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/documenturlchunk.py b/src/mistralai/client/models/documenturlchunk.py new file mode 100644 index 00000000..00eb5535 --- /dev/null +++ b/src/mistralai/client/models/documenturlchunk.py @@ -0,0 +1,62 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +DocumentURLChunkType = Literal["document_url",] + + +class DocumentURLChunkTypedDict(TypedDict): + document_url: str + document_name: NotRequired[Nullable[str]] + r"""The filename of the document""" + type: NotRequired[DocumentURLChunkType] + + +class DocumentURLChunk(BaseModel): + document_url: str + + document_name: OptionalNullable[str] = UNSET + r"""The filename of the document""" + + type: Optional[DocumentURLChunkType] = "document_url" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["document_name", "type"] + nullable_fields = ["document_name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/embeddingdtype.py b/src/mistralai/client/models/embeddingdtype.py new file mode 100644 index 00000000..26eee779 --- /dev/null +++ b/src/mistralai/client/models/embeddingdtype.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +EmbeddingDtype = Literal[ + "float", + "int8", + "uint8", + "binary", + "ubinary", +] diff --git a/src/mistralai/client/models/embeddingrequest.py b/src/mistralai/client/models/embeddingrequest.py new file mode 100644 index 00000000..1dfe97c8 --- /dev/null +++ b/src/mistralai/client/models/embeddingrequest.py @@ -0,0 +1,90 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .embeddingdtype import EmbeddingDtype +from .encodingformat import EncodingFormat +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +EmbeddingRequestInputsTypedDict = TypeAliasType( + "EmbeddingRequestInputsTypedDict", Union[str, List[str]] +) +r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" + + +EmbeddingRequestInputs = TypeAliasType("EmbeddingRequestInputs", Union[str, List[str]]) +r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" + + +class EmbeddingRequestTypedDict(TypedDict): + model: str + r"""The ID of the model to be used for embedding.""" + inputs: EmbeddingRequestInputsTypedDict + r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + output_dimension: NotRequired[Nullable[int]] + r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" + output_dtype: NotRequired[EmbeddingDtype] + encoding_format: NotRequired[EncodingFormat] + + +class EmbeddingRequest(BaseModel): + model: str + r"""The ID of the model to be used for embedding.""" + + inputs: Annotated[EmbeddingRequestInputs, pydantic.Field(alias="input")] + r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + output_dimension: OptionalNullable[int] = UNSET + r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" + + output_dtype: Optional[EmbeddingDtype] = None + + encoding_format: Optional[EncodingFormat] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "metadata", + "output_dimension", + "output_dtype", + "encoding_format", + ] + nullable_fields = ["metadata", "output_dimension"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/embeddingresponse.py b/src/mistralai/client/models/embeddingresponse.py new file mode 100644 index 00000000..64a28ea9 --- /dev/null +++ b/src/mistralai/client/models/embeddingresponse.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class EmbeddingResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + data: List[EmbeddingResponseDataTypedDict] + + +class EmbeddingResponse(BaseModel): + id: str + + object: str + + model: str + + usage: UsageInfo + + data: List[EmbeddingResponseData] diff --git a/src/mistralai/client/models/embeddingresponsedata.py b/src/mistralai/client/models/embeddingresponsedata.py new file mode 100644 index 00000000..ebd0bf7b --- /dev/null +++ b/src/mistralai/client/models/embeddingresponsedata.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class EmbeddingResponseDataTypedDict(TypedDict): + object: NotRequired[str] + embedding: NotRequired[List[float]] + index: NotRequired[int] + + +class EmbeddingResponseData(BaseModel): + object: Optional[str] = None + + embedding: Optional[List[float]] = None + + index: Optional[int] = None diff --git a/src/mistralai/client/models/encodingformat.py b/src/mistralai/client/models/encodingformat.py new file mode 100644 index 00000000..be6c1a14 --- /dev/null +++ b/src/mistralai/client/models/encodingformat.py @@ -0,0 +1,10 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +EncodingFormat = Literal[ + "float", + "base64", +] diff --git a/src/mistralai/client/models/entitytype.py b/src/mistralai/client/models/entitytype.py new file mode 100644 index 00000000..9c16f4a1 --- /dev/null +++ b/src/mistralai/client/models/entitytype.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +EntityType = Union[ + Literal[ + "User", + "Workspace", + "Org", + ], + UnrecognizedStr, +] +r"""The type of entity, used to share a library.""" diff --git a/src/mistralai/client/models/eventout.py b/src/mistralai/client/models/eventout.py new file mode 100644 index 00000000..5e118d45 --- /dev/null +++ b/src/mistralai/client/models/eventout.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict +from typing_extensions import NotRequired, TypedDict + + +class EventOutTypedDict(TypedDict): + name: str + r"""The name of the event.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + data: NotRequired[Nullable[Dict[str, Any]]] + + +class EventOut(BaseModel): + name: str + r"""The name of the event.""" + + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + + data: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["data"] + nullable_fields = ["data"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/file.py b/src/mistralai/client/models/file.py new file mode 100644 index 00000000..a8bbc6fa --- /dev/null +++ b/src/mistralai/client/models/file.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +import io +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, MultipartFormMetadata +import pydantic +from typing import IO, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FileTypedDict(TypedDict): + file_name: str + content: Union[bytes, IO[bytes], io.BufferedReader] + content_type: NotRequired[str] + + +class File(BaseModel): + file_name: Annotated[ + str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True) + ] + + content: Annotated[ + Union[bytes, IO[bytes], io.BufferedReader], + pydantic.Field(alias=""), + FieldMetadata(multipart=MultipartFormMetadata(content=True)), + ] + + content_type: Annotated[ + Optional[str], + pydantic.Field(alias="Content-Type"), + FieldMetadata(multipart=True), + ] = None diff --git a/src/mistralai/client/models/filechunk.py b/src/mistralai/client/models/filechunk.py new file mode 100644 index 00000000..d8b96f69 --- /dev/null +++ b/src/mistralai/client/models/filechunk.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class FileChunkTypedDict(TypedDict): + file_id: str + type: Literal["file"] + + +class FileChunk(BaseModel): + file_id: str + + TYPE: Annotated[ + Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))], + pydantic.Field(alias="type"), + ] = "file" diff --git a/src/mistralai/client/models/filepurpose.py b/src/mistralai/client/models/filepurpose.py new file mode 100644 index 00000000..eef1b089 --- /dev/null +++ b/src/mistralai/client/models/filepurpose.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +FilePurpose = Union[ + Literal[ + "fine-tune", + "batch", + "ocr", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/files_api_routes_delete_fileop.py b/src/mistralai/client/models/files_api_routes_delete_fileop.py new file mode 100644 index 00000000..b7174866 --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_delete_fileop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class FilesAPIRoutesDeleteFileRequestTypedDict(TypedDict): + file_id: str + + +class FilesAPIRoutesDeleteFileRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/files_api_routes_download_fileop.py b/src/mistralai/client/models/files_api_routes_download_fileop.py new file mode 100644 index 00000000..fa9e491a --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_download_fileop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class FilesAPIRoutesDownloadFileRequestTypedDict(TypedDict): + file_id: str + + +class FilesAPIRoutesDownloadFileRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/files_api_routes_get_signed_urlop.py b/src/mistralai/client/models/files_api_routes_get_signed_urlop.py new file mode 100644 index 00000000..a05f8262 --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_get_signed_urlop.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FilesAPIRoutesGetSignedURLRequestTypedDict(TypedDict): + file_id: str + expiry: NotRequired[int] + r"""Number of hours before the url becomes invalid. Defaults to 24h""" + + +class FilesAPIRoutesGetSignedURLRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + expiry: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 24 + r"""Number of hours before the url becomes invalid. Defaults to 24h""" diff --git a/src/mistralai/client/models/files_api_routes_list_filesop.py b/src/mistralai/client/models/files_api_routes_list_filesop.py new file mode 100644 index 00000000..ace99631 --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_list_filesop.py @@ -0,0 +1,109 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .filepurpose import FilePurpose +from .sampletype import SampleType +from .source import Source +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + include_total: NotRequired[bool] + sample_type: NotRequired[Nullable[List[SampleType]]] + source: NotRequired[Nullable[List[Source]]] + search: NotRequired[Nullable[str]] + purpose: NotRequired[Nullable[FilePurpose]] + mimetypes: NotRequired[Nullable[List[str]]] + + +class FilesAPIRoutesListFilesRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + include_total: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = True + + sample_type: Annotated[ + OptionalNullable[List[SampleType]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + source: Annotated[ + OptionalNullable[List[Source]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + search: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + purpose: Annotated[ + OptionalNullable[FilePurpose], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + mimetypes: Annotated[ + OptionalNullable[List[str]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "page", + "page_size", + "include_total", + "sample_type", + "source", + "search", + "purpose", + "mimetypes", + ] + nullable_fields = ["sample_type", "source", "search", "purpose", "mimetypes"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/files_api_routes_retrieve_fileop.py b/src/mistralai/client/models/files_api_routes_retrieve_fileop.py new file mode 100644 index 00000000..4a9678e5 --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_retrieve_fileop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class FilesAPIRoutesRetrieveFileRequestTypedDict(TypedDict): + file_id: str + + +class FilesAPIRoutesRetrieveFileRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/files_api_routes_upload_fileop.py b/src/mistralai/client/models/files_api_routes_upload_fileop.py new file mode 100644 index 00000000..723c6cc2 --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_upload_fileop.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .file import File, FileTypedDict +from .filepurpose import FilePurpose +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, MultipartFormMetadata +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): + file: FileTypedDict + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + purpose: NotRequired[FilePurpose] + + +class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): + file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + + purpose: Annotated[Optional[FilePurpose], FieldMetadata(multipart=True)] = None diff --git a/src/mistralai/client/models/fileschema.py b/src/mistralai/client/models/fileschema.py new file mode 100644 index 00000000..9ecde454 --- /dev/null +++ b/src/mistralai/client/models/fileschema.py @@ -0,0 +1,94 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .filepurpose import FilePurpose +from .sampletype import SampleType +from .source import Source +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FileSchemaTypedDict(TypedDict): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + size_bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + purpose: FilePurpose + sample_type: SampleType + source: Source + num_lines: NotRequired[Nullable[int]] + mimetype: NotRequired[Nullable[str]] + signature: NotRequired[Nullable[str]] + + +class FileSchema(BaseModel): + id: str + r"""The unique identifier of the file.""" + + object: str + r"""The object type, which is always \"file\".""" + + size_bytes: Annotated[int, pydantic.Field(alias="bytes")] + r"""The size of the file, in bytes.""" + + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + + filename: str + r"""The name of the uploaded file.""" + + purpose: FilePurpose + + sample_type: SampleType + + source: Source + + num_lines: OptionalNullable[int] = UNSET + + mimetype: OptionalNullable[str] = UNSET + + signature: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["num_lines", "mimetype", "signature"] + nullable_fields = ["num_lines", "mimetype", "signature"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/filesignedurl.py b/src/mistralai/client/models/filesignedurl.py new file mode 100644 index 00000000..cbca9847 --- /dev/null +++ b/src/mistralai/client/models/filesignedurl.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class FileSignedURLTypedDict(TypedDict): + url: str + + +class FileSignedURL(BaseModel): + url: str diff --git a/src/mistralai/client/models/fimcompletionrequest.py b/src/mistralai/client/models/fimcompletionrequest.py new file mode 100644 index 00000000..c9eca0af --- /dev/null +++ b/src/mistralai/client/models/fimcompletionrequest.py @@ -0,0 +1,130 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +FIMCompletionRequestStopTypedDict = TypeAliasType( + "FIMCompletionRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionRequestStop = TypeAliasType( + "FIMCompletionRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +class FIMCompletionRequestTypedDict(TypedDict): + model: str + r"""ID of the model with FIM to use.""" + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[FIMCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + + +class FIMCompletionRequest(BaseModel): + model: str + r"""ID of the model with FIM to use.""" + + prompt: str + r"""The text/code to complete.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + + stop: Optional[FIMCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/fimcompletionresponse.py b/src/mistralai/client/models/fimcompletionresponse.py new file mode 100644 index 00000000..8a2eda0c --- /dev/null +++ b/src/mistralai/client/models/fimcompletionresponse.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class FIMCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: int + choices: List[ChatCompletionChoiceTypedDict] + + +class FIMCompletionResponse(BaseModel): + id: str + + object: str + + model: str + + usage: UsageInfo + + created: int + + choices: List[ChatCompletionChoice] diff --git a/src/mistralai/client/models/fimcompletionstreamrequest.py b/src/mistralai/client/models/fimcompletionstreamrequest.py new file mode 100644 index 00000000..29543802 --- /dev/null +++ b/src/mistralai/client/models/fimcompletionstreamrequest.py @@ -0,0 +1,128 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +FIMCompletionStreamRequestStopTypedDict = TypeAliasType( + "FIMCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionStreamRequestStop = TypeAliasType( + "FIMCompletionStreamRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +class FIMCompletionStreamRequestTypedDict(TypedDict): + model: str + r"""ID of the model with FIM to use.""" + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + stop: NotRequired[FIMCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + + +class FIMCompletionStreamRequest(BaseModel): + model: str + r"""ID of the model with FIM to use.""" + + prompt: str + r"""The text/code to complete.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = True + + stop: Optional[FIMCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/finetuneablemodeltype.py b/src/mistralai/client/models/finetuneablemodeltype.py new file mode 100644 index 00000000..f5b8b2ed --- /dev/null +++ b/src/mistralai/client/models/finetuneablemodeltype.py @@ -0,0 +1,10 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +FineTuneableModelType = Literal[ + "completion", + "classifier", +] diff --git a/src/mistralai/client/models/ftclassifierlossfunction.py b/src/mistralai/client/models/ftclassifierlossfunction.py new file mode 100644 index 00000000..c4ef66e0 --- /dev/null +++ b/src/mistralai/client/models/ftclassifierlossfunction.py @@ -0,0 +1,10 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +FTClassifierLossFunction = Literal[ + "single_class", + "multi_class", +] diff --git a/src/mistralai/client/models/ftmodelcapabilitiesout.py b/src/mistralai/client/models/ftmodelcapabilitiesout.py new file mode 100644 index 00000000..be31aa3c --- /dev/null +++ b/src/mistralai/client/models/ftmodelcapabilitiesout.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class FTModelCapabilitiesOutTypedDict(TypedDict): + completion_chat: NotRequired[bool] + completion_fim: NotRequired[bool] + function_calling: NotRequired[bool] + fine_tuning: NotRequired[bool] + classification: NotRequired[bool] + + +class FTModelCapabilitiesOut(BaseModel): + completion_chat: Optional[bool] = True + + completion_fim: Optional[bool] = False + + function_calling: Optional[bool] = False + + fine_tuning: Optional[bool] = False + + classification: Optional[bool] = False diff --git a/src/mistralai/client/models/ftmodelcard.py b/src/mistralai/client/models/ftmodelcard.py new file mode 100644 index 00000000..36cb723d --- /dev/null +++ b/src/mistralai/client/models/ftmodelcard.py @@ -0,0 +1,132 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +FTModelCardType = Literal["fine-tuned",] + + +class FTModelCardTypedDict(TypedDict): + r"""Extra fields for fine-tuned models.""" + + id: str + capabilities: ModelCapabilitiesTypedDict + job: str + root: str + object: NotRequired[str] + created: NotRequired[int] + owned_by: NotRequired[str] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + deprecation: NotRequired[Nullable[datetime]] + deprecation_replacement_model: NotRequired[Nullable[str]] + default_model_temperature: NotRequired[Nullable[float]] + type: FTModelCardType + archived: NotRequired[bool] + + +class FTModelCard(BaseModel): + r"""Extra fields for fine-tuned models.""" + + id: str + + capabilities: ModelCapabilities + + job: str + + root: str + + object: Optional[str] = "model" + + created: Optional[int] = None + + owned_by: Optional[str] = "mistralai" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + deprecation: OptionalNullable[datetime] = UNSET + + deprecation_replacement_model: OptionalNullable[str] = UNSET + + default_model_temperature: OptionalNullable[float] = UNSET + + TYPE: Annotated[ + Annotated[ + Optional[FTModelCardType], AfterValidator(validate_const("fine-tuned")) + ], + pydantic.Field(alias="type"), + ] = "fine-tuned" + + archived: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "created", + "owned_by", + "name", + "description", + "max_context_length", + "aliases", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + "type", + "archived", + ] + nullable_fields = [ + "name", + "description", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/function.py b/src/mistralai/client/models/function.py new file mode 100644 index 00000000..6e2b52ed --- /dev/null +++ b/src/mistralai/client/models/function.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class FunctionTypedDict(TypedDict): + name: str + parameters: Dict[str, Any] + description: NotRequired[str] + strict: NotRequired[bool] + + +class Function(BaseModel): + name: str + + parameters: Dict[str, Any] + + description: Optional[str] = None + + strict: Optional[bool] = None diff --git a/src/mistralai/client/models/functioncall.py b/src/mistralai/client/models/functioncall.py new file mode 100644 index 00000000..6cb6f26e --- /dev/null +++ b/src/mistralai/client/models/functioncall.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType, TypedDict + + +ArgumentsTypedDict = TypeAliasType("ArgumentsTypedDict", Union[Dict[str, Any], str]) + + +Arguments = TypeAliasType("Arguments", Union[Dict[str, Any], str]) + + +class FunctionCallTypedDict(TypedDict): + name: str + arguments: ArgumentsTypedDict + + +class FunctionCall(BaseModel): + name: str + + arguments: Arguments diff --git a/src/mistralai/client/models/functioncallentry.py b/src/mistralai/client/models/functioncallentry.py new file mode 100644 index 00000000..fce4d387 --- /dev/null +++ b/src/mistralai/client/models/functioncallentry.py @@ -0,0 +1,83 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functioncallentryarguments import ( + FunctionCallEntryArguments, + FunctionCallEntryArgumentsTypedDict, +) +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +FunctionCallEntryObject = Literal["entry",] + + +FunctionCallEntryType = Literal["function.call",] + + +class FunctionCallEntryTypedDict(TypedDict): + tool_call_id: str + name: str + arguments: FunctionCallEntryArgumentsTypedDict + object: NotRequired[FunctionCallEntryObject] + type: NotRequired[FunctionCallEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + + +class FunctionCallEntry(BaseModel): + tool_call_id: str + + name: str + + arguments: FunctionCallEntryArguments + + object: Optional[FunctionCallEntryObject] = "entry" + + type: Optional[FunctionCallEntryType] = "function.call" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "type", "created_at", "completed_at", "id"] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/functioncallentryarguments.py b/src/mistralai/client/models/functioncallentryarguments.py new file mode 100644 index 00000000..ac9e6227 --- /dev/null +++ b/src/mistralai/client/models/functioncallentryarguments.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType + + +FunctionCallEntryArgumentsTypedDict = TypeAliasType( + "FunctionCallEntryArgumentsTypedDict", Union[Dict[str, Any], str] +) + + +FunctionCallEntryArguments = TypeAliasType( + "FunctionCallEntryArguments", Union[Dict[str, Any], str] +) diff --git a/src/mistralai/client/models/functioncallevent.py b/src/mistralai/client/models/functioncallevent.py new file mode 100644 index 00000000..4e040585 --- /dev/null +++ b/src/mistralai/client/models/functioncallevent.py @@ -0,0 +1,36 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +FunctionCallEventType = Literal["function.call.delta",] + + +class FunctionCallEventTypedDict(TypedDict): + id: str + name: str + tool_call_id: str + arguments: str + type: NotRequired[FunctionCallEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class FunctionCallEvent(BaseModel): + id: str + + name: str + + tool_call_id: str + + arguments: str + + type: Optional[FunctionCallEventType] = "function.call.delta" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/client/models/functionname.py b/src/mistralai/client/models/functionname.py new file mode 100644 index 00000000..2a05c1de --- /dev/null +++ b/src/mistralai/client/models/functionname.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class FunctionNameTypedDict(TypedDict): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str + + +class FunctionName(BaseModel): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str diff --git a/src/mistralai/client/models/functionresultentry.py b/src/mistralai/client/models/functionresultentry.py new file mode 100644 index 00000000..a843bf9b --- /dev/null +++ b/src/mistralai/client/models/functionresultentry.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +FunctionResultEntryObject = Literal["entry",] + + +FunctionResultEntryType = Literal["function.result",] + + +class FunctionResultEntryTypedDict(TypedDict): + tool_call_id: str + result: str + object: NotRequired[FunctionResultEntryObject] + type: NotRequired[FunctionResultEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + + +class FunctionResultEntry(BaseModel): + tool_call_id: str + + result: str + + object: Optional[FunctionResultEntryObject] = "entry" + + type: Optional[FunctionResultEntryType] = "function.result" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "type", "created_at", "completed_at", "id"] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/functiontool.py b/src/mistralai/client/models/functiontool.py new file mode 100644 index 00000000..74b50d1b --- /dev/null +++ b/src/mistralai/client/models/functiontool.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +FunctionToolType = Literal["function",] + + +class FunctionToolTypedDict(TypedDict): + function: FunctionTypedDict + type: NotRequired[FunctionToolType] + + +class FunctionTool(BaseModel): + function: Function + + type: Optional[FunctionToolType] = "function" diff --git a/src/mistralai/client/models/githubrepositoryin.py b/src/mistralai/client/models/githubrepositoryin.py new file mode 100644 index 00000000..e56fef9b --- /dev/null +++ b/src/mistralai/client/models/githubrepositoryin.py @@ -0,0 +1,69 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +GithubRepositoryInType = Literal["github",] + + +class GithubRepositoryInTypedDict(TypedDict): + name: str + owner: str + token: str + type: NotRequired[GithubRepositoryInType] + ref: NotRequired[Nullable[str]] + weight: NotRequired[float] + + +class GithubRepositoryIn(BaseModel): + name: str + + owner: str + + token: str + + type: Optional[GithubRepositoryInType] = "github" + + ref: OptionalNullable[str] = UNSET + + weight: Optional[float] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "ref", "weight"] + nullable_fields = ["ref"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/githubrepositoryout.py b/src/mistralai/client/models/githubrepositoryout.py new file mode 100644 index 00000000..e3aa9ebc --- /dev/null +++ b/src/mistralai/client/models/githubrepositoryout.py @@ -0,0 +1,69 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +GithubRepositoryOutType = Literal["github",] + + +class GithubRepositoryOutTypedDict(TypedDict): + name: str + owner: str + commit_id: str + type: NotRequired[GithubRepositoryOutType] + ref: NotRequired[Nullable[str]] + weight: NotRequired[float] + + +class GithubRepositoryOut(BaseModel): + name: str + + owner: str + + commit_id: str + + type: Optional[GithubRepositoryOutType] = "github" + + ref: OptionalNullable[str] = UNSET + + weight: Optional[float] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "ref", "weight"] + nullable_fields = ["ref"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/httpvalidationerror.py b/src/mistralai/client/models/httpvalidationerror.py new file mode 100644 index 00000000..34d9b543 --- /dev/null +++ b/src/mistralai/client/models/httpvalidationerror.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .validationerror import ValidationError +from dataclasses import dataclass, field +import httpx +from mistralai.client.models import MistralError +from mistralai.client.types import BaseModel +from typing import List, Optional + + +class HTTPValidationErrorData(BaseModel): + detail: Optional[List[ValidationError]] = None + + +@dataclass(unsafe_hash=True) +class HTTPValidationError(MistralError): + data: HTTPValidationErrorData = field(hash=False) + + def __init__( + self, + data: HTTPValidationErrorData, + raw_response: httpx.Response, + body: Optional[str] = None, + ): + message = body or raw_response.text + super().__init__(message, raw_response, body) + object.__setattr__(self, "data", data) diff --git a/src/mistralai/client/models/imagegenerationtool.py b/src/mistralai/client/models/imagegenerationtool.py new file mode 100644 index 00000000..e09dba81 --- /dev/null +++ b/src/mistralai/client/models/imagegenerationtool.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ImageGenerationToolType = Literal["image_generation",] + + +class ImageGenerationToolTypedDict(TypedDict): + type: NotRequired[ImageGenerationToolType] + + +class ImageGenerationTool(BaseModel): + type: Optional[ImageGenerationToolType] = "image_generation" diff --git a/src/mistralai/client/models/imageurl.py b/src/mistralai/client/models/imageurl.py new file mode 100644 index 00000000..6e61d1ae --- /dev/null +++ b/src/mistralai/client/models/imageurl.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class ImageURLTypedDict(TypedDict): + url: str + detail: NotRequired[Nullable[str]] + + +class ImageURL(BaseModel): + url: str + + detail: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["detail"] + nullable_fields = ["detail"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/imageurlchunk.py b/src/mistralai/client/models/imageurlchunk.py new file mode 100644 index 00000000..f967a3c8 --- /dev/null +++ b/src/mistralai/client/models/imageurlchunk.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imageurl import ImageURL, ImageURLTypedDict +from mistralai.client.types import BaseModel +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ImageURLChunkImageURLTypedDict = TypeAliasType( + "ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str] +) + + +ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) + + +ImageURLChunkType = Literal["image_url",] + + +class ImageURLChunkTypedDict(TypedDict): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLChunkImageURLTypedDict + type: NotRequired[ImageURLChunkType] + + +class ImageURLChunk(BaseModel): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLChunkImageURL + + type: Optional[ImageURLChunkType] = "image_url" diff --git a/src/mistralai/client/models/inputentries.py b/src/mistralai/client/models/inputentries.py new file mode 100644 index 00000000..8ae29837 --- /dev/null +++ b/src/mistralai/client/models/inputentries.py @@ -0,0 +1,37 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict +from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict +from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +InputEntriesTypedDict = TypeAliasType( + "InputEntriesTypedDict", + Union[ + FunctionResultEntryTypedDict, + MessageInputEntryTypedDict, + FunctionCallEntryTypedDict, + ToolExecutionEntryTypedDict, + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ], +) + + +InputEntries = TypeAliasType( + "InputEntries", + Union[ + FunctionResultEntry, + MessageInputEntry, + FunctionCallEntry, + ToolExecutionEntry, + MessageOutputEntry, + AgentHandoffEntry, + ], +) diff --git a/src/mistralai/client/models/inputs.py b/src/mistralai/client/models/inputs.py new file mode 100644 index 00000000..fb067476 --- /dev/null +++ b/src/mistralai/client/models/inputs.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .instructrequest import InstructRequest, InstructRequestTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +InstructRequestInputsMessagesTypedDict = TypeAliasType( + "InstructRequestInputsMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +InstructRequestInputsMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +class InstructRequestInputsTypedDict(TypedDict): + messages: List[InstructRequestInputsMessagesTypedDict] + + +class InstructRequestInputs(BaseModel): + messages: List[InstructRequestInputsMessages] + + +InputsTypedDict = TypeAliasType( + "InputsTypedDict", + Union[InstructRequestInputsTypedDict, List[InstructRequestTypedDict]], +) +r"""Chat to classify""" + + +Inputs = TypeAliasType("Inputs", Union[InstructRequestInputs, List[InstructRequest]]) +r"""Chat to classify""" diff --git a/src/mistralai/client/models/instructrequest.py b/src/mistralai/client/models/instructrequest.py new file mode 100644 index 00000000..1b2f2693 --- /dev/null +++ b/src/mistralai/client/models/instructrequest.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +InstructRequestMessagesTypedDict = TypeAliasType( + "InstructRequestMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +InstructRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +class InstructRequestTypedDict(TypedDict): + messages: List[InstructRequestMessagesTypedDict] + + +class InstructRequest(BaseModel): + messages: List[InstructRequestMessages] diff --git a/src/mistralai/client/models/jobin.py b/src/mistralai/client/models/jobin.py new file mode 100644 index 00000000..dc7684fc --- /dev/null +++ b/src/mistralai/client/models/jobin.py @@ -0,0 +1,147 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict +from .classifiertrainingparametersin import ( + ClassifierTrainingParametersIn, + ClassifierTrainingParametersInTypedDict, +) +from .completiontrainingparametersin import ( + CompletionTrainingParametersIn, + CompletionTrainingParametersInTypedDict, +) +from .finetuneablemodeltype import FineTuneableModelType +from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict +from .trainingfile import TrainingFile, TrainingFileTypedDict +from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +JobInIntegrationsTypedDict = WandbIntegrationTypedDict + + +JobInIntegrations = WandbIntegration + + +HyperparametersTypedDict = TypeAliasType( + "HyperparametersTypedDict", + Union[ + ClassifierTrainingParametersInTypedDict, CompletionTrainingParametersInTypedDict + ], +) + + +Hyperparameters = TypeAliasType( + "Hyperparameters", + Union[ClassifierTrainingParametersIn, CompletionTrainingParametersIn], +) + + +JobInRepositoriesTypedDict = GithubRepositoryInTypedDict + + +JobInRepositories = GithubRepositoryIn + + +class JobInTypedDict(TypedDict): + model: str + r"""The name of the model to fine-tune.""" + hyperparameters: HyperparametersTypedDict + training_files: NotRequired[List[TrainingFileTypedDict]] + validation_files: NotRequired[Nullable[List[str]]] + r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" + suffix: NotRequired[Nullable[str]] + r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" + integrations: NotRequired[Nullable[List[JobInIntegrationsTypedDict]]] + r"""A list of integrations to enable for your fine-tuning job.""" + auto_start: NotRequired[bool] + r"""This field will be required in a future release.""" + invalid_sample_skip_percentage: NotRequired[float] + job_type: NotRequired[Nullable[FineTuneableModelType]] + repositories: NotRequired[Nullable[List[JobInRepositoriesTypedDict]]] + classifier_targets: NotRequired[Nullable[List[ClassifierTargetInTypedDict]]] + + +class JobIn(BaseModel): + model: str + r"""The name of the model to fine-tune.""" + + hyperparameters: Hyperparameters + + training_files: Optional[List[TrainingFile]] = None + + validation_files: OptionalNullable[List[str]] = UNSET + r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" + + suffix: OptionalNullable[str] = UNSET + r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" + + integrations: OptionalNullable[List[JobInIntegrations]] = UNSET + r"""A list of integrations to enable for your fine-tuning job.""" + + auto_start: Optional[bool] = None + r"""This field will be required in a future release.""" + + invalid_sample_skip_percentage: Optional[float] = 0 + + job_type: OptionalNullable[FineTuneableModelType] = UNSET + + repositories: OptionalNullable[List[JobInRepositories]] = UNSET + + classifier_targets: OptionalNullable[List[ClassifierTargetIn]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "training_files", + "validation_files", + "suffix", + "integrations", + "auto_start", + "invalid_sample_skip_percentage", + "job_type", + "repositories", + "classifier_targets", + ] + nullable_fields = [ + "validation_files", + "suffix", + "integrations", + "job_type", + "repositories", + "classifier_targets", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/jobmetadataout.py b/src/mistralai/client/models/jobmetadataout.py new file mode 100644 index 00000000..f91e30c0 --- /dev/null +++ b/src/mistralai/client/models/jobmetadataout.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class JobMetadataOutTypedDict(TypedDict): + expected_duration_seconds: NotRequired[Nullable[int]] + cost: NotRequired[Nullable[float]] + cost_currency: NotRequired[Nullable[str]] + train_tokens_per_step: NotRequired[Nullable[int]] + train_tokens: NotRequired[Nullable[int]] + data_tokens: NotRequired[Nullable[int]] + estimated_start_time: NotRequired[Nullable[int]] + + +class JobMetadataOut(BaseModel): + expected_duration_seconds: OptionalNullable[int] = UNSET + + cost: OptionalNullable[float] = UNSET + + cost_currency: OptionalNullable[str] = UNSET + + train_tokens_per_step: OptionalNullable[int] = UNSET + + train_tokens: OptionalNullable[int] = UNSET + + data_tokens: OptionalNullable[int] = UNSET + + estimated_start_time: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + ] + nullable_fields = [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py b/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py new file mode 100644 index 00000000..21a04f73 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class JobsAPIRoutesBatchCancelBatchJobRequestTypedDict(TypedDict): + job_id: str + + +class JobsAPIRoutesBatchCancelBatchJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py new file mode 100644 index 00000000..32e34281 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py @@ -0,0 +1,59 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JobsAPIRoutesBatchGetBatchJobRequestTypedDict(TypedDict): + job_id: str + inline: NotRequired[Nullable[bool]] + + +class JobsAPIRoutesBatchGetBatchJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + inline: Annotated[ + OptionalNullable[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["inline"] + nullable_fields = ["inline"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py new file mode 100644 index 00000000..3557e773 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py @@ -0,0 +1,108 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .batchjobstatus import BatchJobStatus +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Any, Dict, List, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + created_after: NotRequired[Nullable[datetime]] + created_by_me: NotRequired[bool] + status: NotRequired[Nullable[List[BatchJobStatus]]] + + +class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + model: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + agent_id: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + metadata: Annotated[ + OptionalNullable[Dict[str, Any]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + created_after: Annotated[ + OptionalNullable[datetime], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + created_by_me: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = False + + status: Annotated[ + OptionalNullable[List[BatchJobStatus]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "page", + "page_size", + "model", + "agent_id", + "metadata", + "created_after", + "created_by_me", + "status", + ] + nullable_fields = ["model", "agent_id", "metadata", "created_after", "status"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py new file mode 100644 index 00000000..4536b738 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to archive.""" + + +class JobsAPIRoutesFineTuningArchiveFineTunedModelRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to archive.""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py new file mode 100644 index 00000000..b36d3c3e --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py @@ -0,0 +1,45 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifierdetailedjobout import ( + ClassifierDetailedJobOut, + ClassifierDetailedJobOutTypedDict, +) +from .completiondetailedjobout import ( + CompletionDetailedJobOut, + CompletionDetailedJobOutTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): + job_id: str + r"""The ID of the job to cancel.""" + + +class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the job to cancel.""" + + +JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", + Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningCancelFineTuningJobResponse = Annotated[ + Union[ + Annotated[ClassifierDetailedJobOut, Tag("classifier")], + Annotated[CompletionDetailedJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py new file mode 100644 index 00000000..ece0d15a --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict +from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict +from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutTypedDict +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType + + +Response1TypedDict = TypeAliasType( + "Response1TypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] +) + + +Response1 = Annotated[ + Union[ + Annotated[ClassifierJobOut, Tag("classifier")], + Annotated[CompletionJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] + + +JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", + Union[LegacyJobMetadataOutTypedDict, Response1TypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningCreateFineTuningJobResponse = TypeAliasType( + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", + Union[LegacyJobMetadataOut, Response1], +) +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py new file mode 100644 index 00000000..aa5a2609 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py @@ -0,0 +1,45 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifierdetailedjobout import ( + ClassifierDetailedJobOut, + ClassifierDetailedJobOutTypedDict, +) +from .completiondetailedjobout import ( + CompletionDetailedJobOut, + CompletionDetailedJobOutTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): + job_id: str + r"""The ID of the job to analyse.""" + + +class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the job to analyse.""" + + +JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", + Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningGetFineTuningJobResponse = Annotated[ + Union[ + Annotated[ClassifierDetailedJobOut, Tag("classifier")], + Annotated[CompletionDetailedJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py new file mode 100644 index 00000000..7e399b31 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py @@ -0,0 +1,162 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +QueryParamStatus = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] +r"""The current job state to filter on. When set, the other results are not displayed.""" + + +class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): + page: NotRequired[int] + r"""The page number of the results to be returned.""" + page_size: NotRequired[int] + r"""The number of items to return per page.""" + model: NotRequired[Nullable[str]] + r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" + created_after: NotRequired[Nullable[datetime]] + r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" + created_before: NotRequired[Nullable[datetime]] + created_by_me: NotRequired[bool] + r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" + status: NotRequired[Nullable[QueryParamStatus]] + r"""The current job state to filter on. When set, the other results are not displayed.""" + wandb_project: NotRequired[Nullable[str]] + r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" + wandb_name: NotRequired[Nullable[str]] + r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" + suffix: NotRequired[Nullable[str]] + r"""The model suffix to filter on. When set, the other results are not displayed.""" + + +class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + r"""The page number of the results to be returned.""" + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + r"""The number of items to return per page.""" + + model: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" + + created_after: Annotated[ + OptionalNullable[datetime], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" + + created_before: Annotated[ + OptionalNullable[datetime], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + created_by_me: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = False + r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" + + status: Annotated[ + OptionalNullable[QueryParamStatus], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The current job state to filter on. When set, the other results are not displayed.""" + + wandb_project: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" + + wandb_name: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" + + suffix: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The model suffix to filter on. When set, the other results are not displayed.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "page", + "page_size", + "model", + "created_after", + "created_before", + "created_by_me", + "status", + "wandb_project", + "wandb_name", + "suffix", + ] + nullable_fields = [ + "model", + "created_after", + "created_before", + "status", + "wandb_project", + "wandb_name", + "suffix", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py new file mode 100644 index 00000000..ed5938b0 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py @@ -0,0 +1,43 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifierdetailedjobout import ( + ClassifierDetailedJobOut, + ClassifierDetailedJobOutTypedDict, +) +from .completiondetailedjobout import ( + CompletionDetailedJobOut, + CompletionDetailedJobOutTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): + job_id: str + + +class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + +JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", + Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningStartFineTuningJobResponse = Annotated[ + Union[ + Annotated[ClassifierDetailedJobOut, Tag("classifier")], + Annotated[CompletionDetailedJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py new file mode 100644 index 00000000..e1be0ac0 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to unarchive.""" + + +class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to unarchive.""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py new file mode 100644 index 00000000..a2b70b37 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifierftmodelout import ClassifierFTModelOut, ClassifierFTModelOutTypedDict +from .completionftmodelout import CompletionFTModelOut, CompletionFTModelOutTypedDict +from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import ( + FieldMetadata, + PathParamMetadata, + RequestMetadata, + get_discriminator, +) +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to update.""" + update_ft_model_in: UpdateFTModelInTypedDict + + +class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to update.""" + + update_ft_model_in: Annotated[ + UpdateFTModelIn, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] + + +JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", + Union[CompletionFTModelOutTypedDict, ClassifierFTModelOutTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningUpdateFineTunedModelResponse = Annotated[ + Union[ + Annotated[ClassifierFTModelOut, Tag("classifier")], + Annotated[CompletionFTModelOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "model_type", "model_type")), +] +r"""OK""" diff --git a/src/mistralai/client/models/jobsout.py b/src/mistralai/client/models/jobsout.py new file mode 100644 index 00000000..9087704f --- /dev/null +++ b/src/mistralai/client/models/jobsout.py @@ -0,0 +1,41 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict +from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +JobsOutDataTypedDict = TypeAliasType( + "JobsOutDataTypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] +) + + +JobsOutData = Annotated[ + Union[ + Annotated[ClassifierJobOut, Tag("classifier")], + Annotated[CompletionJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] + + +JobsOutObject = Literal["list",] + + +class JobsOutTypedDict(TypedDict): + total: int + data: NotRequired[List[JobsOutDataTypedDict]] + object: NotRequired[JobsOutObject] + + +class JobsOut(BaseModel): + total: int + + data: Optional[List[JobsOutData]] = None + + object: Optional[JobsOutObject] = "list" diff --git a/src/mistralai/client/models/jsonschema.py b/src/mistralai/client/models/jsonschema.py new file mode 100644 index 00000000..db2fa55b --- /dev/null +++ b/src/mistralai/client/models/jsonschema.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JSONSchemaTypedDict(TypedDict): + name: str + schema_definition: Dict[str, Any] + description: NotRequired[Nullable[str]] + strict: NotRequired[bool] + + +class JSONSchema(BaseModel): + name: str + + schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] + + description: OptionalNullable[str] = UNSET + + strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["description", "strict"] + nullable_fields = ["description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/legacyjobmetadataout.py b/src/mistralai/client/models/legacyjobmetadataout.py new file mode 100644 index 00000000..155ecea7 --- /dev/null +++ b/src/mistralai/client/models/legacyjobmetadataout.py @@ -0,0 +1,125 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +LegacyJobMetadataOutObject = Literal["job.metadata",] + + +class LegacyJobMetadataOutTypedDict(TypedDict): + details: str + expected_duration_seconds: NotRequired[Nullable[int]] + r"""The approximated time (in seconds) for the fine-tuning process to complete.""" + cost: NotRequired[Nullable[float]] + r"""The cost of the fine-tuning job.""" + cost_currency: NotRequired[Nullable[str]] + r"""The currency used for the fine-tuning job cost.""" + train_tokens_per_step: NotRequired[Nullable[int]] + r"""The number of tokens consumed by one training step.""" + train_tokens: NotRequired[Nullable[int]] + r"""The total number of tokens used during the fine-tuning process.""" + data_tokens: NotRequired[Nullable[int]] + r"""The total number of tokens in the training dataset.""" + estimated_start_time: NotRequired[Nullable[int]] + deprecated: NotRequired[bool] + epochs: NotRequired[Nullable[float]] + r"""The number of complete passes through the entire training dataset.""" + training_steps: NotRequired[Nullable[int]] + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + object: NotRequired[LegacyJobMetadataOutObject] + + +class LegacyJobMetadataOut(BaseModel): + details: str + + expected_duration_seconds: OptionalNullable[int] = UNSET + r"""The approximated time (in seconds) for the fine-tuning process to complete.""" + + cost: OptionalNullable[float] = UNSET + r"""The cost of the fine-tuning job.""" + + cost_currency: OptionalNullable[str] = UNSET + r"""The currency used for the fine-tuning job cost.""" + + train_tokens_per_step: OptionalNullable[int] = UNSET + r"""The number of tokens consumed by one training step.""" + + train_tokens: OptionalNullable[int] = UNSET + r"""The total number of tokens used during the fine-tuning process.""" + + data_tokens: OptionalNullable[int] = UNSET + r"""The total number of tokens in the training dataset.""" + + estimated_start_time: OptionalNullable[int] = UNSET + + deprecated: Optional[bool] = True + + epochs: OptionalNullable[float] = UNSET + r"""The number of complete passes through the entire training dataset.""" + + training_steps: OptionalNullable[int] = UNSET + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + + object: Optional[LegacyJobMetadataOutObject] = "job.metadata" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + "deprecated", + "epochs", + "training_steps", + "object", + ] + nullable_fields = [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + "epochs", + "training_steps", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/libraries_delete_v1op.py b/src/mistralai/client/models/libraries_delete_v1op.py new file mode 100644 index 00000000..fa447de0 --- /dev/null +++ b/src/mistralai/client/models/libraries_delete_v1op.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDeleteV1RequestTypedDict(TypedDict): + library_id: str + + +class LibrariesDeleteV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_delete_v1op.py b/src/mistralai/client/models/libraries_documents_delete_v1op.py new file mode 100644 index 00000000..bc5ec6e5 --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_delete_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsDeleteV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsDeleteV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py b/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py new file mode 100644 index 00000000..24ed897d --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetExtractedTextSignedURLV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py b/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py new file mode 100644 index 00000000..350c8e73 --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetSignedURLV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetSignedURLV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_get_status_v1op.py b/src/mistralai/client/models/libraries_documents_get_status_v1op.py new file mode 100644 index 00000000..92b077d3 --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_status_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetStatusV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetStatusV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py b/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py new file mode 100644 index 00000000..68f9725a --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetTextContentV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetTextContentV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_get_v1op.py b/src/mistralai/client/models/libraries_documents_get_v1op.py new file mode 100644 index 00000000..a67e687e --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_list_v1op.py b/src/mistralai/client/models/libraries_documents_list_v1op.py new file mode 100644 index 00000000..5dec3385 --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_list_v1op.py @@ -0,0 +1,97 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class LibrariesDocumentsListV1RequestTypedDict(TypedDict): + library_id: str + search: NotRequired[Nullable[str]] + page_size: NotRequired[int] + page: NotRequired[int] + filters_attributes: NotRequired[Nullable[str]] + sort_by: NotRequired[str] + sort_order: NotRequired[str] + + +class LibrariesDocumentsListV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + search: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + filters_attributes: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + sort_by: Annotated[ + Optional[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = "created_at" + + sort_order: Annotated[ + Optional[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = "desc" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "search", + "page_size", + "page", + "filters_attributes", + "sort_by", + "sort_order", + ] + nullable_fields = ["search", "filters_attributes"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/libraries_documents_reprocess_v1op.py b/src/mistralai/client/models/libraries_documents_reprocess_v1op.py new file mode 100644 index 00000000..8aee7552 --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_reprocess_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsReprocessV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsReprocessV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_update_v1op.py b/src/mistralai/client/models/libraries_documents_update_v1op.py new file mode 100644 index 00000000..f677b4dd --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_update_v1op.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documentupdatein import DocumentUpdateIn, DocumentUpdateInTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsUpdateV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + document_update_in: DocumentUpdateInTypedDict + + +class LibrariesDocumentsUpdateV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_update_in: Annotated[ + DocumentUpdateIn, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/libraries_documents_upload_v1op.py b/src/mistralai/client/models/libraries_documents_upload_v1op.py new file mode 100644 index 00000000..e2d59d9f --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_upload_v1op.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .file import File, FileTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import ( + FieldMetadata, + MultipartFormMetadata, + PathParamMetadata, + RequestMetadata, +) +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsUploadV1DocumentUploadTypedDict(TypedDict): + file: FileTypedDict + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + + +class LibrariesDocumentsUploadV1DocumentUpload(BaseModel): + file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + + +class LibrariesDocumentsUploadV1RequestTypedDict(TypedDict): + library_id: str + request_body: LibrariesDocumentsUploadV1DocumentUploadTypedDict + + +class LibrariesDocumentsUploadV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + request_body: Annotated[ + LibrariesDocumentsUploadV1DocumentUpload, + FieldMetadata(request=RequestMetadata(media_type="multipart/form-data")), + ] diff --git a/src/mistralai/client/models/libraries_get_v1op.py b/src/mistralai/client/models/libraries_get_v1op.py new file mode 100644 index 00000000..83ae377d --- /dev/null +++ b/src/mistralai/client/models/libraries_get_v1op.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesGetV1RequestTypedDict(TypedDict): + library_id: str + + +class LibrariesGetV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_share_create_v1op.py b/src/mistralai/client/models/libraries_share_create_v1op.py new file mode 100644 index 00000000..d0313bd0 --- /dev/null +++ b/src/mistralai/client/models/libraries_share_create_v1op.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .sharingin import SharingIn, SharingInTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesShareCreateV1RequestTypedDict(TypedDict): + library_id: str + sharing_in: SharingInTypedDict + + +class LibrariesShareCreateV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + sharing_in: Annotated[ + SharingIn, FieldMetadata(request=RequestMetadata(media_type="application/json")) + ] diff --git a/src/mistralai/client/models/libraries_share_delete_v1op.py b/src/mistralai/client/models/libraries_share_delete_v1op.py new file mode 100644 index 00000000..620527d5 --- /dev/null +++ b/src/mistralai/client/models/libraries_share_delete_v1op.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .sharingdelete import SharingDelete, SharingDeleteTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesShareDeleteV1RequestTypedDict(TypedDict): + library_id: str + sharing_delete: SharingDeleteTypedDict + + +class LibrariesShareDeleteV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + sharing_delete: Annotated[ + SharingDelete, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/libraries_share_list_v1op.py b/src/mistralai/client/models/libraries_share_list_v1op.py new file mode 100644 index 00000000..fd5d9d33 --- /dev/null +++ b/src/mistralai/client/models/libraries_share_list_v1op.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesShareListV1RequestTypedDict(TypedDict): + library_id: str + + +class LibrariesShareListV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_update_v1op.py b/src/mistralai/client/models/libraries_update_v1op.py new file mode 100644 index 00000000..c434ab7a --- /dev/null +++ b/src/mistralai/client/models/libraries_update_v1op.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesUpdateV1RequestTypedDict(TypedDict): + library_id: str + library_in_update: LibraryInUpdateTypedDict + + +class LibrariesUpdateV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + library_in_update: Annotated[ + LibraryInUpdate, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/libraryin.py b/src/mistralai/client/models/libraryin.py new file mode 100644 index 00000000..a7b36158 --- /dev/null +++ b/src/mistralai/client/models/libraryin.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class LibraryInTypedDict(TypedDict): + name: str + description: NotRequired[Nullable[str]] + chunk_size: NotRequired[Nullable[int]] + + +class LibraryIn(BaseModel): + name: str + + description: OptionalNullable[str] = UNSET + + chunk_size: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["description", "chunk_size"] + nullable_fields = ["description", "chunk_size"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/libraryinupdate.py b/src/mistralai/client/models/libraryinupdate.py new file mode 100644 index 00000000..f0241ba1 --- /dev/null +++ b/src/mistralai/client/models/libraryinupdate.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class LibraryInUpdateTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class LibraryInUpdate(BaseModel): + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["name", "description"] + nullable_fields = ["name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/libraryout.py b/src/mistralai/client/models/libraryout.py new file mode 100644 index 00000000..d1953f16 --- /dev/null +++ b/src/mistralai/client/models/libraryout.py @@ -0,0 +1,116 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class LibraryOutTypedDict(TypedDict): + id: str + name: str + created_at: datetime + updated_at: datetime + owner_id: Nullable[str] + owner_type: str + total_size: int + nb_documents: int + chunk_size: Nullable[int] + emoji: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + generated_description: NotRequired[Nullable[str]] + explicit_user_members_count: NotRequired[Nullable[int]] + explicit_workspace_members_count: NotRequired[Nullable[int]] + org_sharing_role: NotRequired[Nullable[str]] + generated_name: NotRequired[Nullable[str]] + r"""Generated Name""" + + +class LibraryOut(BaseModel): + id: str + + name: str + + created_at: datetime + + updated_at: datetime + + owner_id: Nullable[str] + + owner_type: str + + total_size: int + + nb_documents: int + + chunk_size: Nullable[int] + + emoji: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + generated_description: OptionalNullable[str] = UNSET + + explicit_user_members_count: OptionalNullable[int] = UNSET + + explicit_workspace_members_count: OptionalNullable[int] = UNSET + + org_sharing_role: OptionalNullable[str] = UNSET + + generated_name: OptionalNullable[str] = UNSET + r"""Generated Name""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "emoji", + "description", + "generated_description", + "explicit_user_members_count", + "explicit_workspace_members_count", + "org_sharing_role", + "generated_name", + ] + nullable_fields = [ + "owner_id", + "chunk_size", + "emoji", + "description", + "generated_description", + "explicit_user_members_count", + "explicit_workspace_members_count", + "org_sharing_role", + "generated_name", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/listdocumentout.py b/src/mistralai/client/models/listdocumentout.py new file mode 100644 index 00000000..24969a0f --- /dev/null +++ b/src/mistralai/client/models/listdocumentout.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documentout import DocumentOut, DocumentOutTypedDict +from .paginationinfo import PaginationInfo, PaginationInfoTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ListDocumentOutTypedDict(TypedDict): + pagination: PaginationInfoTypedDict + data: List[DocumentOutTypedDict] + + +class ListDocumentOut(BaseModel): + pagination: PaginationInfo + + data: List[DocumentOut] diff --git a/src/mistralai/client/models/listfilesout.py b/src/mistralai/client/models/listfilesout.py new file mode 100644 index 00000000..1db17c40 --- /dev/null +++ b/src/mistralai/client/models/listfilesout.py @@ -0,0 +1,58 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .fileschema import FileSchema, FileSchemaTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List +from typing_extensions import NotRequired, TypedDict + + +class ListFilesOutTypedDict(TypedDict): + data: List[FileSchemaTypedDict] + object: str + total: NotRequired[Nullable[int]] + + +class ListFilesOut(BaseModel): + data: List[FileSchema] + + object: str + + total: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["total"] + nullable_fields = ["total"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/listlibraryout.py b/src/mistralai/client/models/listlibraryout.py new file mode 100644 index 00000000..24aaa1a9 --- /dev/null +++ b/src/mistralai/client/models/listlibraryout.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .libraryout import LibraryOut, LibraryOutTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ListLibraryOutTypedDict(TypedDict): + data: List[LibraryOutTypedDict] + + +class ListLibraryOut(BaseModel): + data: List[LibraryOut] diff --git a/src/mistralai/client/models/listsharingout.py b/src/mistralai/client/models/listsharingout.py new file mode 100644 index 00000000..f139813f --- /dev/null +++ b/src/mistralai/client/models/listsharingout.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .sharingout import SharingOut, SharingOutTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ListSharingOutTypedDict(TypedDict): + data: List[SharingOutTypedDict] + + +class ListSharingOut(BaseModel): + data: List[SharingOut] diff --git a/src/mistralai/client/models/messageentries.py b/src/mistralai/client/models/messageentries.py new file mode 100644 index 00000000..9b1706de --- /dev/null +++ b/src/mistralai/client/models/messageentries.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +MessageEntriesTypedDict = TypeAliasType( + "MessageEntriesTypedDict", + Union[MessageInputEntryTypedDict, MessageOutputEntryTypedDict], +) + + +MessageEntries = TypeAliasType( + "MessageEntries", Union[MessageInputEntry, MessageOutputEntry] +) diff --git a/src/mistralai/client/models/messageinputcontentchunks.py b/src/mistralai/client/models/messageinputcontentchunks.py new file mode 100644 index 00000000..e90d8aa0 --- /dev/null +++ b/src/mistralai/client/models/messageinputcontentchunks.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +MessageInputContentChunksTypedDict = TypeAliasType( + "MessageInputContentChunksTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + DocumentURLChunkTypedDict, + ThinkChunkTypedDict, + ToolFileChunkTypedDict, + ], +) + + +MessageInputContentChunks = TypeAliasType( + "MessageInputContentChunks", + Union[TextChunk, ImageURLChunk, DocumentURLChunk, ThinkChunk, ToolFileChunk], +) diff --git a/src/mistralai/client/models/messageinputentry.py b/src/mistralai/client/models/messageinputentry.py new file mode 100644 index 00000000..12a31097 --- /dev/null +++ b/src/mistralai/client/models/messageinputentry.py @@ -0,0 +1,111 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .messageinputcontentchunks import ( + MessageInputContentChunks, + MessageInputContentChunksTypedDict, +) +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +Object = Literal["entry",] + + +MessageInputEntryType = Literal["message.input",] + + +MessageInputEntryRole = Literal[ + "assistant", + "user", +] + + +MessageInputEntryContentTypedDict = TypeAliasType( + "MessageInputEntryContentTypedDict", + Union[str, List[MessageInputContentChunksTypedDict]], +) + + +MessageInputEntryContent = TypeAliasType( + "MessageInputEntryContent", Union[str, List[MessageInputContentChunks]] +) + + +class MessageInputEntryTypedDict(TypedDict): + r"""Representation of an input message inside the conversation.""" + + role: MessageInputEntryRole + content: MessageInputEntryContentTypedDict + object: NotRequired[Object] + type: NotRequired[MessageInputEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + prefix: NotRequired[bool] + + +class MessageInputEntry(BaseModel): + r"""Representation of an input message inside the conversation.""" + + role: MessageInputEntryRole + + content: MessageInputEntryContent + + object: Optional[Object] = "entry" + + type: Optional[MessageInputEntryType] = "message.input" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + prefix: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "type", + "created_at", + "completed_at", + "id", + "prefix", + ] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/messageoutputcontentchunks.py b/src/mistralai/client/models/messageoutputcontentchunks.py new file mode 100644 index 00000000..136a7608 --- /dev/null +++ b/src/mistralai/client/models/messageoutputcontentchunks.py @@ -0,0 +1,37 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict +from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +MessageOutputContentChunksTypedDict = TypeAliasType( + "MessageOutputContentChunksTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + DocumentURLChunkTypedDict, + ThinkChunkTypedDict, + ToolFileChunkTypedDict, + ToolReferenceChunkTypedDict, + ], +) + + +MessageOutputContentChunks = TypeAliasType( + "MessageOutputContentChunks", + Union[ + TextChunk, + ImageURLChunk, + DocumentURLChunk, + ThinkChunk, + ToolFileChunk, + ToolReferenceChunk, + ], +) diff --git a/src/mistralai/client/models/messageoutputentry.py b/src/mistralai/client/models/messageoutputentry.py new file mode 100644 index 00000000..d52e4e3e --- /dev/null +++ b/src/mistralai/client/models/messageoutputentry.py @@ -0,0 +1,109 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .messageoutputcontentchunks import ( + MessageOutputContentChunks, + MessageOutputContentChunksTypedDict, +) +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +MessageOutputEntryObject = Literal["entry",] + + +MessageOutputEntryType = Literal["message.output",] + + +MessageOutputEntryRole = Literal["assistant",] + + +MessageOutputEntryContentTypedDict = TypeAliasType( + "MessageOutputEntryContentTypedDict", + Union[str, List[MessageOutputContentChunksTypedDict]], +) + + +MessageOutputEntryContent = TypeAliasType( + "MessageOutputEntryContent", Union[str, List[MessageOutputContentChunks]] +) + + +class MessageOutputEntryTypedDict(TypedDict): + content: MessageOutputEntryContentTypedDict + object: NotRequired[MessageOutputEntryObject] + type: NotRequired[MessageOutputEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + agent_id: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] + role: NotRequired[MessageOutputEntryRole] + + +class MessageOutputEntry(BaseModel): + content: MessageOutputEntryContent + + object: Optional[MessageOutputEntryObject] = "entry" + + type: Optional[MessageOutputEntryType] = "message.output" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + agent_id: OptionalNullable[str] = UNSET + + model: OptionalNullable[str] = UNSET + + role: Optional[MessageOutputEntryRole] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "type", + "created_at", + "completed_at", + "id", + "agent_id", + "model", + "role", + ] + nullable_fields = ["completed_at", "agent_id", "model"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/messageoutputevent.py b/src/mistralai/client/models/messageoutputevent.py new file mode 100644 index 00000000..3db7f5a0 --- /dev/null +++ b/src/mistralai/client/models/messageoutputevent.py @@ -0,0 +1,101 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +MessageOutputEventType = Literal["message.output.delta",] + + +MessageOutputEventRole = Literal["assistant",] + + +MessageOutputEventContentTypedDict = TypeAliasType( + "MessageOutputEventContentTypedDict", Union[str, OutputContentChunksTypedDict] +) + + +MessageOutputEventContent = TypeAliasType( + "MessageOutputEventContent", Union[str, OutputContentChunks] +) + + +class MessageOutputEventTypedDict(TypedDict): + id: str + content: MessageOutputEventContentTypedDict + type: NotRequired[MessageOutputEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + content_index: NotRequired[int] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + role: NotRequired[MessageOutputEventRole] + + +class MessageOutputEvent(BaseModel): + id: str + + content: MessageOutputEventContent + + type: Optional[MessageOutputEventType] = "message.output.delta" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + content_index: Optional[int] = 0 + + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + role: Optional[MessageOutputEventRole] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "type", + "created_at", + "output_index", + "content_index", + "model", + "agent_id", + "role", + ] + nullable_fields = ["model", "agent_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/metricout.py b/src/mistralai/client/models/metricout.py new file mode 100644 index 00000000..f8027a69 --- /dev/null +++ b/src/mistralai/client/models/metricout.py @@ -0,0 +1,60 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class MetricOutTypedDict(TypedDict): + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + + train_loss: NotRequired[Nullable[float]] + valid_loss: NotRequired[Nullable[float]] + valid_mean_token_accuracy: NotRequired[Nullable[float]] + + +class MetricOut(BaseModel): + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + + train_loss: OptionalNullable[float] = UNSET + + valid_loss: OptionalNullable[float] = UNSET + + valid_mean_token_accuracy: OptionalNullable[float] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] + nullable_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/mistralerror.py b/src/mistralai/client/models/mistralerror.py new file mode 100644 index 00000000..28cfd22d --- /dev/null +++ b/src/mistralai/client/models/mistralerror.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional +from dataclasses import dataclass, field + + +@dataclass(unsafe_hash=True) +class MistralError(Exception): + """The base class for all HTTP error responses.""" + + message: str + status_code: int + body: str + headers: httpx.Headers = field(hash=False) + raw_response: httpx.Response = field(hash=False) + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + object.__setattr__(self, "message", message) + object.__setattr__(self, "status_code", raw_response.status_code) + object.__setattr__( + self, "body", body if body is not None else raw_response.text + ) + object.__setattr__(self, "headers", raw_response.headers) + object.__setattr__(self, "raw_response", raw_response) + + def __str__(self): + return self.message diff --git a/src/mistralai/client/models/mistralpromptmode.py b/src/mistralai/client/models/mistralpromptmode.py new file mode 100644 index 00000000..7008fc05 --- /dev/null +++ b/src/mistralai/client/models/mistralpromptmode.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] +r"""Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. +""" diff --git a/src/mistralai/client/models/modelcapabilities.py b/src/mistralai/client/models/modelcapabilities.py new file mode 100644 index 00000000..a6db80e7 --- /dev/null +++ b/src/mistralai/client/models/modelcapabilities.py @@ -0,0 +1,41 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ModelCapabilitiesTypedDict(TypedDict): + completion_chat: NotRequired[bool] + function_calling: NotRequired[bool] + completion_fim: NotRequired[bool] + fine_tuning: NotRequired[bool] + vision: NotRequired[bool] + ocr: NotRequired[bool] + classification: NotRequired[bool] + moderation: NotRequired[bool] + audio: NotRequired[bool] + audio_transcription: NotRequired[bool] + + +class ModelCapabilities(BaseModel): + completion_chat: Optional[bool] = False + + function_calling: Optional[bool] = False + + completion_fim: Optional[bool] = False + + fine_tuning: Optional[bool] = False + + vision: Optional[bool] = False + + ocr: Optional[bool] = False + + classification: Optional[bool] = False + + moderation: Optional[bool] = False + + audio: Optional[bool] = False + + audio_transcription: Optional[bool] = False diff --git a/src/mistralai/client/models/modelconversation.py b/src/mistralai/client/models/modelconversation.py new file mode 100644 index 00000000..574f053d --- /dev/null +++ b/src/mistralai/client/models/modelconversation.py @@ -0,0 +1,139 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ModelConversationToolsTypedDict = TypeAliasType( + "ModelConversationToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +ModelConversationTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +ModelConversationObject = Literal["conversation",] + + +class ModelConversationTypedDict(TypedDict): + id: str + created_at: datetime + updated_at: datetime + model: str + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[ModelConversationToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + name: NotRequired[Nullable[str]] + r"""Name given to the conversation.""" + description: NotRequired[Nullable[str]] + r"""Description of the what the conversation is about.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + object: NotRequired[ModelConversationObject] + + +class ModelConversation(BaseModel): + id: str + + created_at: datetime + + updated_at: datetime + + model: str + + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[ModelConversationTools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + name: OptionalNullable[str] = UNSET + r"""Name given to the conversation.""" + + description: OptionalNullable[str] = UNSET + r"""Description of the what the conversation is about.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + object: Optional[ModelConversationObject] = "conversation" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "instructions", + "tools", + "completion_args", + "name", + "description", + "metadata", + "object", + ] + nullable_fields = ["instructions", "name", "description", "metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/modellist.py b/src/mistralai/client/models/modellist.py new file mode 100644 index 00000000..6a5209fa --- /dev/null +++ b/src/mistralai/client/models/modellist.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .basemodelcard import BaseModelCard, BaseModelCardTypedDict +from .ftmodelcard import FTModelCard, FTModelCardTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +DataTypedDict = TypeAliasType( + "DataTypedDict", Union[BaseModelCardTypedDict, FTModelCardTypedDict] +) + + +Data = Annotated[ + Union[ + Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")] + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class ModelListTypedDict(TypedDict): + object: NotRequired[str] + data: NotRequired[List[DataTypedDict]] + + +class ModelList(BaseModel): + object: Optional[str] = "list" + + data: Optional[List[Data]] = None diff --git a/src/mistralai/client/models/moderationobject.py b/src/mistralai/client/models/moderationobject.py new file mode 100644 index 00000000..a6b44b96 --- /dev/null +++ b/src/mistralai/client/models/moderationobject.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class ModerationObjectTypedDict(TypedDict): + categories: NotRequired[Dict[str, bool]] + r"""Moderation result thresholds""" + category_scores: NotRequired[Dict[str, float]] + r"""Moderation result""" + + +class ModerationObject(BaseModel): + categories: Optional[Dict[str, bool]] = None + r"""Moderation result thresholds""" + + category_scores: Optional[Dict[str, float]] = None + r"""Moderation result""" diff --git a/src/mistralai/client/models/moderationresponse.py b/src/mistralai/client/models/moderationresponse.py new file mode 100644 index 00000000..288c8d82 --- /dev/null +++ b/src/mistralai/client/models/moderationresponse.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .moderationobject import ModerationObject, ModerationObjectTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ModerationResponseTypedDict(TypedDict): + id: str + model: str + results: List[ModerationObjectTypedDict] + + +class ModerationResponse(BaseModel): + id: str + + model: str + + results: List[ModerationObject] diff --git a/src/mistralai/client/models/no_response_error.py b/src/mistralai/client/models/no_response_error.py new file mode 100644 index 00000000..1deab64b --- /dev/null +++ b/src/mistralai/client/models/no_response_error.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from dataclasses import dataclass + + +@dataclass(unsafe_hash=True) +class NoResponseError(Exception): + """Error raised when no HTTP response is received from the server.""" + + message: str + + def __init__(self, message: str = "No response received"): + object.__setattr__(self, "message", message) + super().__init__(message) + + def __str__(self): + return self.message diff --git a/src/mistralai/client/models/ocrimageobject.py b/src/mistralai/client/models/ocrimageobject.py new file mode 100644 index 00000000..e97fa8df --- /dev/null +++ b/src/mistralai/client/models/ocrimageobject.py @@ -0,0 +1,89 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class OCRImageObjectTypedDict(TypedDict): + id: str + r"""Image ID for extracted image in a page""" + top_left_x: Nullable[int] + r"""X coordinate of top-left corner of the extracted image""" + top_left_y: Nullable[int] + r"""Y coordinate of top-left corner of the extracted image""" + bottom_right_x: Nullable[int] + r"""X coordinate of bottom-right corner of the extracted image""" + bottom_right_y: Nullable[int] + r"""Y coordinate of bottom-right corner of the extracted image""" + image_base64: NotRequired[Nullable[str]] + r"""Base64 string of the extracted image""" + image_annotation: NotRequired[Nullable[str]] + r"""Annotation of the extracted image in json str""" + + +class OCRImageObject(BaseModel): + id: str + r"""Image ID for extracted image in a page""" + + top_left_x: Nullable[int] + r"""X coordinate of top-left corner of the extracted image""" + + top_left_y: Nullable[int] + r"""Y coordinate of top-left corner of the extracted image""" + + bottom_right_x: Nullable[int] + r"""X coordinate of bottom-right corner of the extracted image""" + + bottom_right_y: Nullable[int] + r"""Y coordinate of bottom-right corner of the extracted image""" + + image_base64: OptionalNullable[str] = UNSET + r"""Base64 string of the extracted image""" + + image_annotation: OptionalNullable[str] = UNSET + r"""Annotation of the extracted image in json str""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["image_base64", "image_annotation"] + nullable_fields = [ + "top_left_x", + "top_left_y", + "bottom_right_x", + "bottom_right_y", + "image_base64", + "image_annotation", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/ocrpagedimensions.py b/src/mistralai/client/models/ocrpagedimensions.py new file mode 100644 index 00000000..f4fc11e0 --- /dev/null +++ b/src/mistralai/client/models/ocrpagedimensions.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class OCRPageDimensionsTypedDict(TypedDict): + dpi: int + r"""Dots per inch of the page-image""" + height: int + r"""Height of the image in pixels""" + width: int + r"""Width of the image in pixels""" + + +class OCRPageDimensions(BaseModel): + dpi: int + r"""Dots per inch of the page-image""" + + height: int + r"""Height of the image in pixels""" + + width: int + r"""Width of the image in pixels""" diff --git a/src/mistralai/client/models/ocrpageobject.py b/src/mistralai/client/models/ocrpageobject.py new file mode 100644 index 00000000..f8b43601 --- /dev/null +++ b/src/mistralai/client/models/ocrpageobject.py @@ -0,0 +1,91 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict +from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict +from .ocrtableobject import OCRTableObject, OCRTableObjectTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class OCRPageObjectTypedDict(TypedDict): + index: int + r"""The page index in a pdf document starting from 0""" + markdown: str + r"""The markdown string response of the page""" + images: List[OCRImageObjectTypedDict] + r"""List of all extracted images in the page""" + dimensions: Nullable[OCRPageDimensionsTypedDict] + r"""The dimensions of the PDF Page's screenshot image""" + tables: NotRequired[List[OCRTableObjectTypedDict]] + r"""List of all extracted tables in the page""" + hyperlinks: NotRequired[List[str]] + r"""List of all hyperlinks in the page""" + header: NotRequired[Nullable[str]] + r"""Header of the page""" + footer: NotRequired[Nullable[str]] + r"""Footer of the page""" + + +class OCRPageObject(BaseModel): + index: int + r"""The page index in a pdf document starting from 0""" + + markdown: str + r"""The markdown string response of the page""" + + images: List[OCRImageObject] + r"""List of all extracted images in the page""" + + dimensions: Nullable[OCRPageDimensions] + r"""The dimensions of the PDF Page's screenshot image""" + + tables: Optional[List[OCRTableObject]] = None + r"""List of all extracted tables in the page""" + + hyperlinks: Optional[List[str]] = None + r"""List of all hyperlinks in the page""" + + header: OptionalNullable[str] = UNSET + r"""Header of the page""" + + footer: OptionalNullable[str] = UNSET + r"""Footer of the page""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["tables", "hyperlinks", "header", "footer"] + nullable_fields = ["header", "footer", "dimensions"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/ocrrequest.py b/src/mistralai/client/models/ocrrequest.py new file mode 100644 index 00000000..03a6028c --- /dev/null +++ b/src/mistralai/client/models/ocrrequest.py @@ -0,0 +1,146 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +DocumentTypedDict = TypeAliasType( + "DocumentTypedDict", + Union[FileChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict], +) +r"""Document to run OCR on""" + + +Document = TypeAliasType("Document", Union[FileChunk, ImageURLChunk, DocumentURLChunk]) +r"""Document to run OCR on""" + + +TableFormat = Literal[ + "markdown", + "html", +] + + +class OCRRequestTypedDict(TypedDict): + model: Nullable[str] + document: DocumentTypedDict + r"""Document to run OCR on""" + id: NotRequired[str] + pages: NotRequired[Nullable[List[int]]] + r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" + include_image_base64: NotRequired[Nullable[bool]] + r"""Include image URLs in response""" + image_limit: NotRequired[Nullable[int]] + r"""Max images to extract""" + image_min_size: NotRequired[Nullable[int]] + r"""Minimum height and width of image to extract""" + bbox_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] + r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" + document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] + r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + document_annotation_prompt: NotRequired[Nullable[str]] + r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" + table_format: NotRequired[Nullable[TableFormat]] + extract_header: NotRequired[bool] + extract_footer: NotRequired[bool] + + +class OCRRequest(BaseModel): + model: Nullable[str] + + document: Document + r"""Document to run OCR on""" + + id: Optional[str] = None + + pages: OptionalNullable[List[int]] = UNSET + r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" + + include_image_base64: OptionalNullable[bool] = UNSET + r"""Include image URLs in response""" + + image_limit: OptionalNullable[int] = UNSET + r"""Max images to extract""" + + image_min_size: OptionalNullable[int] = UNSET + r"""Minimum height and width of image to extract""" + + bbox_annotation_format: OptionalNullable[ResponseFormat] = UNSET + r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" + + document_annotation_format: OptionalNullable[ResponseFormat] = UNSET + r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + + document_annotation_prompt: OptionalNullable[str] = UNSET + r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" + + table_format: OptionalNullable[TableFormat] = UNSET + + extract_header: Optional[bool] = None + + extract_footer: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "id", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + "extract_header", + "extract_footer", + ] + nullable_fields = [ + "model", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/ocrresponse.py b/src/mistralai/client/models/ocrresponse.py new file mode 100644 index 00000000..2813a1ca --- /dev/null +++ b/src/mistralai/client/models/ocrresponse.py @@ -0,0 +1,68 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict +from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List +from typing_extensions import NotRequired, TypedDict + + +class OCRResponseTypedDict(TypedDict): + pages: List[OCRPageObjectTypedDict] + r"""List of OCR info for pages.""" + model: str + r"""The model used to generate the OCR.""" + usage_info: OCRUsageInfoTypedDict + document_annotation: NotRequired[Nullable[str]] + r"""Formatted response in the request_format if provided in json str""" + + +class OCRResponse(BaseModel): + pages: List[OCRPageObject] + r"""List of OCR info for pages.""" + + model: str + r"""The model used to generate the OCR.""" + + usage_info: OCRUsageInfo + + document_annotation: OptionalNullable[str] = UNSET + r"""Formatted response in the request_format if provided in json str""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["document_annotation"] + nullable_fields = ["document_annotation"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/ocrtableobject.py b/src/mistralai/client/models/ocrtableobject.py new file mode 100644 index 00000000..0c9091de --- /dev/null +++ b/src/mistralai/client/models/ocrtableobject.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +import pydantic +from typing import Literal +from typing_extensions import Annotated, TypedDict + + +Format = Literal[ + "markdown", + "html", +] +r"""Format of the table""" + + +class OCRTableObjectTypedDict(TypedDict): + id: str + r"""Table ID for extracted table in a page""" + content: str + r"""Content of the table in the given format""" + format_: Format + r"""Format of the table""" + + +class OCRTableObject(BaseModel): + id: str + r"""Table ID for extracted table in a page""" + + content: str + r"""Content of the table in the given format""" + + format_: Annotated[Format, pydantic.Field(alias="format")] + r"""Format of the table""" diff --git a/src/mistralai/client/models/ocrusageinfo.py b/src/mistralai/client/models/ocrusageinfo.py new file mode 100644 index 00000000..62f07fd4 --- /dev/null +++ b/src/mistralai/client/models/ocrusageinfo.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class OCRUsageInfoTypedDict(TypedDict): + pages_processed: int + r"""Number of pages processed""" + doc_size_bytes: NotRequired[Nullable[int]] + r"""Document size in bytes""" + + +class OCRUsageInfo(BaseModel): + pages_processed: int + r"""Number of pages processed""" + + doc_size_bytes: OptionalNullable[int] = UNSET + r"""Document size in bytes""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["doc_size_bytes"] + nullable_fields = ["doc_size_bytes"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/outputcontentchunks.py b/src/mistralai/client/models/outputcontentchunks.py new file mode 100644 index 00000000..ad0c087e --- /dev/null +++ b/src/mistralai/client/models/outputcontentchunks.py @@ -0,0 +1,37 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict +from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +OutputContentChunksTypedDict = TypeAliasType( + "OutputContentChunksTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + DocumentURLChunkTypedDict, + ThinkChunkTypedDict, + ToolFileChunkTypedDict, + ToolReferenceChunkTypedDict, + ], +) + + +OutputContentChunks = TypeAliasType( + "OutputContentChunks", + Union[ + TextChunk, + ImageURLChunk, + DocumentURLChunk, + ThinkChunk, + ToolFileChunk, + ToolReferenceChunk, + ], +) diff --git a/src/mistralai/client/models/paginationinfo.py b/src/mistralai/client/models/paginationinfo.py new file mode 100644 index 00000000..0252f448 --- /dev/null +++ b/src/mistralai/client/models/paginationinfo.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class PaginationInfoTypedDict(TypedDict): + total_items: int + total_pages: int + current_page: int + page_size: int + has_more: bool + + +class PaginationInfo(BaseModel): + total_items: int + + total_pages: int + + current_page: int + + page_size: int + + has_more: bool diff --git a/src/mistralai/client/models/prediction.py b/src/mistralai/client/models/prediction.py new file mode 100644 index 00000000..f2c5d9c6 --- /dev/null +++ b/src/mistralai/client/models/prediction.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class PredictionTypedDict(TypedDict): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + type: Literal["content"] + content: NotRequired[str] + + +class Prediction(BaseModel): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + TYPE: Annotated[ + Annotated[ + Optional[Literal["content"]], AfterValidator(validate_const("content")) + ], + pydantic.Field(alias="type"), + ] = "content" + + content: Optional[str] = "" diff --git a/src/mistralai/client/models/processingstatusout.py b/src/mistralai/client/models/processingstatusout.py new file mode 100644 index 00000000..031f386f --- /dev/null +++ b/src/mistralai/client/models/processingstatusout.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class ProcessingStatusOutTypedDict(TypedDict): + document_id: str + processing_status: str + + +class ProcessingStatusOut(BaseModel): + document_id: str + + processing_status: str diff --git a/src/mistralai/client/models/realtimetranscriptionerror.py b/src/mistralai/client/models/realtimetranscriptionerror.py new file mode 100644 index 00000000..e6a889de --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionerror.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .realtimetranscriptionerrordetail import ( + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionErrorDetailTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionErrorTypedDict(TypedDict): + error: RealtimeTranscriptionErrorDetailTypedDict + type: Literal["error"] + + +class RealtimeTranscriptionError(BaseModel): + error: RealtimeTranscriptionErrorDetail + + TYPE: Annotated[ + Annotated[Optional[Literal["error"]], AfterValidator(validate_const("error"))], + pydantic.Field(alias="type"), + ] = "error" diff --git a/src/mistralai/client/models/realtimetranscriptionerrordetail.py b/src/mistralai/client/models/realtimetranscriptionerrordetail.py new file mode 100644 index 00000000..27bb8d87 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionerrordetail.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType, TypedDict + + +MessageTypedDict = TypeAliasType("MessageTypedDict", Union[str, Dict[str, Any]]) +r"""Human-readable error message.""" + + +Message = TypeAliasType("Message", Union[str, Dict[str, Any]]) +r"""Human-readable error message.""" + + +class RealtimeTranscriptionErrorDetailTypedDict(TypedDict): + message: MessageTypedDict + r"""Human-readable error message.""" + code: int + r"""Internal error code for debugging.""" + + +class RealtimeTranscriptionErrorDetail(BaseModel): + message: Message + r"""Human-readable error message.""" + + code: int + r"""Internal error code for debugging.""" diff --git a/src/mistralai/client/models/realtimetranscriptionsession.py b/src/mistralai/client/models/realtimetranscriptionsession.py new file mode 100644 index 00000000..3a330651 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsession.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .audioformat import AudioFormat, AudioFormatTypedDict +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class RealtimeTranscriptionSessionTypedDict(TypedDict): + request_id: str + model: str + audio_format: AudioFormatTypedDict + + +class RealtimeTranscriptionSession(BaseModel): + request_id: str + + model: str + + audio_format: AudioFormat diff --git a/src/mistralai/client/models/realtimetranscriptionsessioncreated.py b/src/mistralai/client/models/realtimetranscriptionsessioncreated.py new file mode 100644 index 00000000..cc6d5028 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsessioncreated.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .realtimetranscriptionsession import ( + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionSessionCreatedTypedDict(TypedDict): + session: RealtimeTranscriptionSessionTypedDict + type: Literal["session.created"] + + +class RealtimeTranscriptionSessionCreated(BaseModel): + session: RealtimeTranscriptionSession + + TYPE: Annotated[ + Annotated[ + Optional[Literal["session.created"]], + AfterValidator(validate_const("session.created")), + ], + pydantic.Field(alias="type"), + ] = "session.created" diff --git a/src/mistralai/client/models/realtimetranscriptionsessionupdated.py b/src/mistralai/client/models/realtimetranscriptionsessionupdated.py new file mode 100644 index 00000000..3da23595 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsessionupdated.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .realtimetranscriptionsession import ( + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionSessionUpdatedTypedDict(TypedDict): + session: RealtimeTranscriptionSessionTypedDict + type: Literal["session.updated"] + + +class RealtimeTranscriptionSessionUpdated(BaseModel): + session: RealtimeTranscriptionSession + + TYPE: Annotated[ + Annotated[ + Optional[Literal["session.updated"]], + AfterValidator(validate_const("session.updated")), + ], + pydantic.Field(alias="type"), + ] = "session.updated" diff --git a/src/mistralai/client/models/referencechunk.py b/src/mistralai/client/models/referencechunk.py new file mode 100644 index 00000000..4c703b81 --- /dev/null +++ b/src/mistralai/client/models/referencechunk.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ReferenceChunkType = Literal["reference",] + + +class ReferenceChunkTypedDict(TypedDict): + reference_ids: List[int] + type: NotRequired[ReferenceChunkType] + + +class ReferenceChunk(BaseModel): + reference_ids: List[int] + + type: Optional[ReferenceChunkType] = "reference" diff --git a/src/mistralai/client/models/requestsource.py b/src/mistralai/client/models/requestsource.py new file mode 100644 index 00000000..7b0a35c4 --- /dev/null +++ b/src/mistralai/client/models/requestsource.py @@ -0,0 +1,11 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +RequestSource = Literal[ + "api", + "playground", + "agent_builder_v1", +] diff --git a/src/mistralai/client/models/responsedoneevent.py b/src/mistralai/client/models/responsedoneevent.py new file mode 100644 index 00000000..54056256 --- /dev/null +++ b/src/mistralai/client/models/responsedoneevent.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict +from datetime import datetime +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ResponseDoneEventType = Literal["conversation.response.done",] + + +class ResponseDoneEventTypedDict(TypedDict): + usage: ConversationUsageInfoTypedDict + type: NotRequired[ResponseDoneEventType] + created_at: NotRequired[datetime] + + +class ResponseDoneEvent(BaseModel): + usage: ConversationUsageInfo + + type: Optional[ResponseDoneEventType] = "conversation.response.done" + + created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/responseerrorevent.py b/src/mistralai/client/models/responseerrorevent.py new file mode 100644 index 00000000..c9ef95a0 --- /dev/null +++ b/src/mistralai/client/models/responseerrorevent.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ResponseErrorEventType = Literal["conversation.response.error",] + + +class ResponseErrorEventTypedDict(TypedDict): + message: str + code: int + type: NotRequired[ResponseErrorEventType] + created_at: NotRequired[datetime] + + +class ResponseErrorEvent(BaseModel): + message: str + + code: int + + type: Optional[ResponseErrorEventType] = "conversation.response.error" + + created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/responseformat.py b/src/mistralai/client/models/responseformat.py new file mode 100644 index 00000000..5899b017 --- /dev/null +++ b/src/mistralai/client/models/responseformat.py @@ -0,0 +1,60 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .jsonschema import JSONSchema, JSONSchemaTypedDict +from .responseformats import ResponseFormats +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ResponseFormatTypedDict(TypedDict): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + type: NotRequired[ResponseFormats] + json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] + + +class ResponseFormat(BaseModel): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + type: Optional[ResponseFormats] = None + + json_schema: OptionalNullable[JSONSchema] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "json_schema"] + nullable_fields = ["json_schema"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/responseformats.py b/src/mistralai/client/models/responseformats.py new file mode 100644 index 00000000..cbf83ce7 --- /dev/null +++ b/src/mistralai/client/models/responseformats.py @@ -0,0 +1,11 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +ResponseFormats = Literal[ + "text", + "json_object", + "json_schema", +] diff --git a/src/mistralai/client/models/responsestartedevent.py b/src/mistralai/client/models/responsestartedevent.py new file mode 100644 index 00000000..dc6a10f9 --- /dev/null +++ b/src/mistralai/client/models/responsestartedevent.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ResponseStartedEventType = Literal["conversation.response.started",] + + +class ResponseStartedEventTypedDict(TypedDict): + conversation_id: str + type: NotRequired[ResponseStartedEventType] + created_at: NotRequired[datetime] + + +class ResponseStartedEvent(BaseModel): + conversation_id: str + + type: Optional[ResponseStartedEventType] = "conversation.response.started" + + created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/responsevalidationerror.py b/src/mistralai/client/models/responsevalidationerror.py new file mode 100644 index 00000000..bab5d0b7 --- /dev/null +++ b/src/mistralai/client/models/responsevalidationerror.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional +from dataclasses import dataclass + +from mistralai.client.models import MistralError + + +@dataclass(unsafe_hash=True) +class ResponseValidationError(MistralError): + """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" + + def __init__( + self, + message: str, + raw_response: httpx.Response, + cause: Exception, + body: Optional[str] = None, + ): + message = f"{message}: {cause}" + super().__init__(message, raw_response, body) + + @property + def cause(self): + """Normally the Pydantic ValidationError""" + return self.__cause__ diff --git a/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py new file mode 100644 index 00000000..7fdcd37d --- /dev/null +++ b/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .basemodelcard import BaseModelCard, BaseModelCardTypedDict +from .ftmodelcard import FTModelCard, FTModelCardTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class RetrieveModelV1ModelsModelIDGetRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to retrieve.""" + + +class RetrieveModelV1ModelsModelIDGetRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to retrieve.""" + + +RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict = TypeAliasType( + "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", + Union[BaseModelCardTypedDict, FTModelCardTypedDict], +) +r"""Successful Response""" + + +RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet = Annotated[ + Union[ + Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")] + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] +r"""Successful Response""" diff --git a/src/mistralai/client/models/retrievefileout.py b/src/mistralai/client/models/retrievefileout.py new file mode 100644 index 00000000..ffd0617a --- /dev/null +++ b/src/mistralai/client/models/retrievefileout.py @@ -0,0 +1,97 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .filepurpose import FilePurpose +from .sampletype import SampleType +from .source import Source +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict + + +class RetrieveFileOutTypedDict(TypedDict): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + size_bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + purpose: FilePurpose + sample_type: SampleType + source: Source + deleted: bool + num_lines: NotRequired[Nullable[int]] + mimetype: NotRequired[Nullable[str]] + signature: NotRequired[Nullable[str]] + + +class RetrieveFileOut(BaseModel): + id: str + r"""The unique identifier of the file.""" + + object: str + r"""The object type, which is always \"file\".""" + + size_bytes: Annotated[int, pydantic.Field(alias="bytes")] + r"""The size of the file, in bytes.""" + + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + + filename: str + r"""The name of the uploaded file.""" + + purpose: FilePurpose + + sample_type: SampleType + + source: Source + + deleted: bool + + num_lines: OptionalNullable[int] = UNSET + + mimetype: OptionalNullable[str] = UNSET + + signature: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["num_lines", "mimetype", "signature"] + nullable_fields = ["num_lines", "mimetype", "signature"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/sampletype.py b/src/mistralai/client/models/sampletype.py new file mode 100644 index 00000000..e0727b02 --- /dev/null +++ b/src/mistralai/client/models/sampletype.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +SampleType = Union[ + Literal[ + "pretrain", + "instruct", + "batch_request", + "batch_result", + "batch_error", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/sdkerror.py b/src/mistralai/client/models/sdkerror.py new file mode 100644 index 00000000..ceb03c48 --- /dev/null +++ b/src/mistralai/client/models/sdkerror.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional +from dataclasses import dataclass + +from mistralai.client.models import MistralError + +MAX_MESSAGE_LEN = 10_000 + + +@dataclass(unsafe_hash=True) +class SDKError(MistralError): + """The fallback error class if no more specific error class is matched.""" + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + body_display = body or raw_response.text or '""' + + if message: + message += ": " + message += f"Status {raw_response.status_code}" + + headers = raw_response.headers + content_type = headers.get("content-type", '""') + if content_type != "application/json": + if " " in content_type: + content_type = f'"{content_type}"' + message += f" Content-Type {content_type}" + + if len(body_display) > MAX_MESSAGE_LEN: + truncated = body_display[:MAX_MESSAGE_LEN] + remaining = len(body_display) - MAX_MESSAGE_LEN + body_display = f"{truncated}...and {remaining} more chars" + + message += f". Body: {body_display}" + message = message.strip() + + super().__init__(message, raw_response, body) diff --git a/src/mistralai/client/models/security.py b/src/mistralai/client/models/security.py new file mode 100644 index 00000000..1b67229b --- /dev/null +++ b/src/mistralai/client/models/security.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, SecurityMetadata +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class SecurityTypedDict(TypedDict): + api_key: NotRequired[str] + + +class Security(BaseModel): + api_key: Annotated[ + Optional[str], + FieldMetadata( + security=SecurityMetadata( + scheme=True, + scheme_type="http", + sub_type="bearer", + field_name="Authorization", + ) + ), + ] = None diff --git a/src/mistralai/client/models/shareenum.py b/src/mistralai/client/models/shareenum.py new file mode 100644 index 00000000..ca1b9624 --- /dev/null +++ b/src/mistralai/client/models/shareenum.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ShareEnum = Union[ + Literal[ + "Viewer", + "Editor", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/sharingdelete.py b/src/mistralai/client/models/sharingdelete.py new file mode 100644 index 00000000..d659342f --- /dev/null +++ b/src/mistralai/client/models/sharingdelete.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .entitytype import EntityType +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class SharingDeleteTypedDict(TypedDict): + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + share_with_type: EntityType + r"""The type of entity, used to share a library.""" + org_id: NotRequired[Nullable[str]] + + +class SharingDelete(BaseModel): + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + + share_with_type: EntityType + r"""The type of entity, used to share a library.""" + + org_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["org_id"] + nullable_fields = ["org_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/sharingin.py b/src/mistralai/client/models/sharingin.py new file mode 100644 index 00000000..630f4c70 --- /dev/null +++ b/src/mistralai/client/models/sharingin.py @@ -0,0 +1,65 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .entitytype import EntityType +from .shareenum import ShareEnum +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class SharingInTypedDict(TypedDict): + level: ShareEnum + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + share_with_type: EntityType + r"""The type of entity, used to share a library.""" + org_id: NotRequired[Nullable[str]] + + +class SharingIn(BaseModel): + level: ShareEnum + + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + + share_with_type: EntityType + r"""The type of entity, used to share a library.""" + + org_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["org_id"] + nullable_fields = ["org_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/sharingout.py b/src/mistralai/client/models/sharingout.py new file mode 100644 index 00000000..195701d1 --- /dev/null +++ b/src/mistralai/client/models/sharingout.py @@ -0,0 +1,65 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class SharingOutTypedDict(TypedDict): + library_id: str + org_id: str + role: str + share_with_type: str + share_with_uuid: Nullable[str] + user_id: NotRequired[Nullable[str]] + + +class SharingOut(BaseModel): + library_id: str + + org_id: str + + role: str + + share_with_type: str + + share_with_uuid: Nullable[str] + + user_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["user_id"] + nullable_fields = ["user_id", "share_with_uuid"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/source.py b/src/mistralai/client/models/source.py new file mode 100644 index 00000000..181b327e --- /dev/null +++ b/src/mistralai/client/models/source.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +Source = Union[ + Literal[ + "upload", + "repository", + "mistral", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/ssetypes.py b/src/mistralai/client/models/ssetypes.py new file mode 100644 index 00000000..796f0327 --- /dev/null +++ b/src/mistralai/client/models/ssetypes.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +SSETypes = Literal[ + "conversation.response.started", + "conversation.response.done", + "conversation.response.error", + "message.output.delta", + "tool.execution.started", + "tool.execution.delta", + "tool.execution.done", + "agent.handoff.started", + "agent.handoff.done", + "function.call.delta", +] +r"""Server side events sent when streaming a conversation response.""" diff --git a/src/mistralai/client/models/systemmessage.py b/src/mistralai/client/models/systemmessage.py new file mode 100644 index 00000000..9e01bc57 --- /dev/null +++ b/src/mistralai/client/models/systemmessage.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, +) +from mistralai.client.types import BaseModel +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +SystemMessageContentTypedDict = TypeAliasType( + "SystemMessageContentTypedDict", + Union[str, List[SystemMessageContentChunksTypedDict]], +) + + +SystemMessageContent = TypeAliasType( + "SystemMessageContent", Union[str, List[SystemMessageContentChunks]] +) + + +Role = Literal["system",] + + +class SystemMessageTypedDict(TypedDict): + content: SystemMessageContentTypedDict + role: NotRequired[Role] + + +class SystemMessage(BaseModel): + content: SystemMessageContent + + role: Optional[Role] = "system" diff --git a/src/mistralai/client/models/systemmessagecontentchunks.py b/src/mistralai/client/models/systemmessagecontentchunks.py new file mode 100644 index 00000000..7a797379 --- /dev/null +++ b/src/mistralai/client/models/systemmessagecontentchunks.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType + + +SystemMessageContentChunksTypedDict = TypeAliasType( + "SystemMessageContentChunksTypedDict", + Union[TextChunkTypedDict, ThinkChunkTypedDict], +) + + +SystemMessageContentChunks = Annotated[ + Union[Annotated[TextChunk, Tag("text")], Annotated[ThinkChunk, Tag("thinking")]], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] diff --git a/src/mistralai/client/models/textchunk.py b/src/mistralai/client/models/textchunk.py new file mode 100644 index 00000000..4207ce7e --- /dev/null +++ b/src/mistralai/client/models/textchunk.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +TextChunkType = Literal["text",] + + +class TextChunkTypedDict(TypedDict): + text: str + type: NotRequired[TextChunkType] + + +class TextChunk(BaseModel): + text: str + + type: Optional[TextChunkType] = "text" diff --git a/src/mistralai/client/models/thinkchunk.py b/src/mistralai/client/models/thinkchunk.py new file mode 100644 index 00000000..b1560806 --- /dev/null +++ b/src/mistralai/client/models/thinkchunk.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai.client.types import BaseModel +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ThinkingTypedDict = TypeAliasType( + "ThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] +) + + +Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) + + +ThinkChunkType = Literal["thinking",] + + +class ThinkChunkTypedDict(TypedDict): + thinking: List[ThinkingTypedDict] + closed: NotRequired[bool] + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + type: NotRequired[ThinkChunkType] + + +class ThinkChunk(BaseModel): + thinking: List[Thinking] + + closed: Optional[bool] = None + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + type: Optional[ThinkChunkType] = "thinking" diff --git a/src/mistralai/client/models/timestampgranularity.py b/src/mistralai/client/models/timestampgranularity.py new file mode 100644 index 00000000..5bda890f --- /dev/null +++ b/src/mistralai/client/models/timestampgranularity.py @@ -0,0 +1,10 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +TimestampGranularity = Literal[ + "segment", + "word", +] diff --git a/src/mistralai/client/models/tool.py b/src/mistralai/client/models/tool.py new file mode 100644 index 00000000..4b29f575 --- /dev/null +++ b/src/mistralai/client/models/tool.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from .tooltypes import ToolTypes +from mistralai.client.types import BaseModel +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolTypedDict(TypedDict): + function: FunctionTypedDict + type: NotRequired[ToolTypes] + + +class Tool(BaseModel): + function: Function + + type: Optional[ToolTypes] = None diff --git a/src/mistralai/client/models/toolcall.py b/src/mistralai/client/models/toolcall.py new file mode 100644 index 00000000..558b49bf --- /dev/null +++ b/src/mistralai/client/models/toolcall.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functioncall import FunctionCall, FunctionCallTypedDict +from .tooltypes import ToolTypes +from mistralai.client.types import BaseModel +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolCallTypedDict(TypedDict): + function: FunctionCallTypedDict + id: NotRequired[str] + type: NotRequired[ToolTypes] + index: NotRequired[int] + + +class ToolCall(BaseModel): + function: FunctionCall + + id: Optional[str] = "null" + + type: Optional[ToolTypes] = None + + index: Optional[int] = 0 diff --git a/src/mistralai/client/models/toolchoice.py b/src/mistralai/client/models/toolchoice.py new file mode 100644 index 00000000..2c7f6cbf --- /dev/null +++ b/src/mistralai/client/models/toolchoice.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functionname import FunctionName, FunctionNameTypedDict +from .tooltypes import ToolTypes +from mistralai.client.types import BaseModel +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolChoiceTypedDict(TypedDict): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionNameTypedDict + r"""this restriction of `Function` is used to select a specific function to call""" + type: NotRequired[ToolTypes] + + +class ToolChoice(BaseModel): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionName + r"""this restriction of `Function` is used to select a specific function to call""" + + type: Optional[ToolTypes] = None diff --git a/src/mistralai/client/models/toolchoiceenum.py b/src/mistralai/client/models/toolchoiceenum.py new file mode 100644 index 00000000..01f6f677 --- /dev/null +++ b/src/mistralai/client/models/toolchoiceenum.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +ToolChoiceEnum = Literal[ + "auto", + "none", + "any", + "required", +] diff --git a/src/mistralai/client/models/toolexecutiondeltaevent.py b/src/mistralai/client/models/toolexecutiondeltaevent.py new file mode 100644 index 00000000..0268e6a0 --- /dev/null +++ b/src/mistralai/client/models/toolexecutiondeltaevent.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.client.types import BaseModel +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ToolExecutionDeltaEventType = Literal["tool.execution.delta",] + + +ToolExecutionDeltaEventNameTypedDict = TypeAliasType( + "ToolExecutionDeltaEventNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionDeltaEventName = TypeAliasType( + "ToolExecutionDeltaEventName", Union[BuiltInConnectors, str] +) + + +class ToolExecutionDeltaEventTypedDict(TypedDict): + id: str + name: ToolExecutionDeltaEventNameTypedDict + arguments: str + type: NotRequired[ToolExecutionDeltaEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class ToolExecutionDeltaEvent(BaseModel): + id: str + + name: ToolExecutionDeltaEventName + + arguments: str + + type: Optional[ToolExecutionDeltaEventType] = "tool.execution.delta" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/client/models/toolexecutiondoneevent.py b/src/mistralai/client/models/toolexecutiondoneevent.py new file mode 100644 index 00000000..854baee9 --- /dev/null +++ b/src/mistralai/client/models/toolexecutiondoneevent.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.client.types import BaseModel +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ToolExecutionDoneEventType = Literal["tool.execution.done",] + + +ToolExecutionDoneEventNameTypedDict = TypeAliasType( + "ToolExecutionDoneEventNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionDoneEventName = TypeAliasType( + "ToolExecutionDoneEventName", Union[BuiltInConnectors, str] +) + + +class ToolExecutionDoneEventTypedDict(TypedDict): + id: str + name: ToolExecutionDoneEventNameTypedDict + type: NotRequired[ToolExecutionDoneEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + info: NotRequired[Dict[str, Any]] + + +class ToolExecutionDoneEvent(BaseModel): + id: str + + name: ToolExecutionDoneEventName + + type: Optional[ToolExecutionDoneEventType] = "tool.execution.done" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + info: Optional[Dict[str, Any]] = None diff --git a/src/mistralai/client/models/toolexecutionentry.py b/src/mistralai/client/models/toolexecutionentry.py new file mode 100644 index 00000000..839709fb --- /dev/null +++ b/src/mistralai/client/models/toolexecutionentry.py @@ -0,0 +1,86 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ToolExecutionEntryObject = Literal["entry",] + + +ToolExecutionEntryType = Literal["tool.execution",] + + +NameTypedDict = TypeAliasType("NameTypedDict", Union[BuiltInConnectors, str]) + + +Name = TypeAliasType("Name", Union[BuiltInConnectors, str]) + + +class ToolExecutionEntryTypedDict(TypedDict): + name: NameTypedDict + arguments: str + object: NotRequired[ToolExecutionEntryObject] + type: NotRequired[ToolExecutionEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + info: NotRequired[Dict[str, Any]] + + +class ToolExecutionEntry(BaseModel): + name: Name + + arguments: str + + object: Optional[ToolExecutionEntryObject] = "entry" + + type: Optional[ToolExecutionEntryType] = "tool.execution" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + info: Optional[Dict[str, Any]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "type", "created_at", "completed_at", "id", "info"] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolexecutionstartedevent.py b/src/mistralai/client/models/toolexecutionstartedevent.py new file mode 100644 index 00000000..66438cfc --- /dev/null +++ b/src/mistralai/client/models/toolexecutionstartedevent.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.client.types import BaseModel +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ToolExecutionStartedEventType = Literal["tool.execution.started",] + + +ToolExecutionStartedEventNameTypedDict = TypeAliasType( + "ToolExecutionStartedEventNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionStartedEventName = TypeAliasType( + "ToolExecutionStartedEventName", Union[BuiltInConnectors, str] +) + + +class ToolExecutionStartedEventTypedDict(TypedDict): + id: str + name: ToolExecutionStartedEventNameTypedDict + arguments: str + type: NotRequired[ToolExecutionStartedEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class ToolExecutionStartedEvent(BaseModel): + id: str + + name: ToolExecutionStartedEventName + + arguments: str + + type: Optional[ToolExecutionStartedEventType] = "tool.execution.started" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/client/models/toolfilechunk.py b/src/mistralai/client/models/toolfilechunk.py new file mode 100644 index 00000000..62b5ffed --- /dev/null +++ b/src/mistralai/client/models/toolfilechunk.py @@ -0,0 +1,75 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ToolFileChunkType = Literal["tool_file",] + + +ToolFileChunkToolTypedDict = TypeAliasType( + "ToolFileChunkToolTypedDict", Union[BuiltInConnectors, str] +) + + +ToolFileChunkTool = TypeAliasType("ToolFileChunkTool", Union[BuiltInConnectors, str]) + + +class ToolFileChunkTypedDict(TypedDict): + tool: ToolFileChunkToolTypedDict + file_id: str + type: NotRequired[ToolFileChunkType] + file_name: NotRequired[Nullable[str]] + file_type: NotRequired[Nullable[str]] + + +class ToolFileChunk(BaseModel): + tool: ToolFileChunkTool + + file_id: str + + type: Optional[ToolFileChunkType] = "tool_file" + + file_name: OptionalNullable[str] = UNSET + + file_type: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "file_name", "file_type"] + nullable_fields = ["file_name", "file_type"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolmessage.py b/src/mistralai/client/models/toolmessage.py new file mode 100644 index 00000000..eae2d2ae --- /dev/null +++ b/src/mistralai/client/models/toolmessage.py @@ -0,0 +1,72 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ToolMessageContentTypedDict = TypeAliasType( + "ToolMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) + + +ToolMessageRole = Literal["tool",] + + +class ToolMessageTypedDict(TypedDict): + content: Nullable[ToolMessageContentTypedDict] + tool_call_id: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + role: NotRequired[ToolMessageRole] + + +class ToolMessage(BaseModel): + content: Nullable[ToolMessageContent] + + tool_call_id: OptionalNullable[str] = UNSET + + name: OptionalNullable[str] = UNSET + + role: Optional[ToolMessageRole] = "tool" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["tool_call_id", "name", "role"] + nullable_fields = ["content", "tool_call_id", "name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolreferencechunk.py b/src/mistralai/client/models/toolreferencechunk.py new file mode 100644 index 00000000..882b1563 --- /dev/null +++ b/src/mistralai/client/models/toolreferencechunk.py @@ -0,0 +1,80 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ToolReferenceChunkType = Literal["tool_reference",] + + +ToolReferenceChunkToolTypedDict = TypeAliasType( + "ToolReferenceChunkToolTypedDict", Union[BuiltInConnectors, str] +) + + +ToolReferenceChunkTool = TypeAliasType( + "ToolReferenceChunkTool", Union[BuiltInConnectors, str] +) + + +class ToolReferenceChunkTypedDict(TypedDict): + tool: ToolReferenceChunkToolTypedDict + title: str + type: NotRequired[ToolReferenceChunkType] + url: NotRequired[Nullable[str]] + favicon: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class ToolReferenceChunk(BaseModel): + tool: ToolReferenceChunkTool + + title: str + + type: Optional[ToolReferenceChunkType] = "tool_reference" + + url: OptionalNullable[str] = UNSET + + favicon: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "url", "favicon", "description"] + nullable_fields = ["url", "favicon", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/tooltypes.py b/src/mistralai/client/models/tooltypes.py new file mode 100644 index 00000000..abb26c25 --- /dev/null +++ b/src/mistralai/client/models/tooltypes.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ToolTypes = Union[Literal["function",], UnrecognizedStr] diff --git a/src/mistralai/client/models/trainingfile.py b/src/mistralai/client/models/trainingfile.py new file mode 100644 index 00000000..1d9763e0 --- /dev/null +++ b/src/mistralai/client/models/trainingfile.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class TrainingFileTypedDict(TypedDict): + file_id: str + weight: NotRequired[float] + + +class TrainingFile(BaseModel): + file_id: str + + weight: Optional[float] = 1 diff --git a/src/mistralai/client/models/transcriptionresponse.py b/src/mistralai/client/models/transcriptionresponse.py new file mode 100644 index 00000000..24c0b92e --- /dev/null +++ b/src/mistralai/client/models/transcriptionresponse.py @@ -0,0 +1,79 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .transcriptionsegmentchunk import ( + TranscriptionSegmentChunk, + TranscriptionSegmentChunkTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, List, Optional +from typing_extensions import NotRequired, TypedDict + + +class TranscriptionResponseTypedDict(TypedDict): + model: str + text: str + usage: UsageInfoTypedDict + language: Nullable[str] + segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] + + +class TranscriptionResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + model: str + + text: str + + usage: UsageInfo + + language: Nullable[str] + + segments: Optional[List[TranscriptionSegmentChunk]] = None + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["segments"] + nullable_fields = ["language"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/client/models/transcriptionsegmentchunk.py b/src/mistralai/client/models/transcriptionsegmentchunk.py new file mode 100644 index 00000000..c89d84fc --- /dev/null +++ b/src/mistralai/client/models/transcriptionsegmentchunk.py @@ -0,0 +1,86 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +Type = Literal["transcription_segment",] + + +class TranscriptionSegmentChunkTypedDict(TypedDict): + text: str + start: float + end: float + score: NotRequired[Nullable[float]] + speaker_id: NotRequired[Nullable[str]] + type: NotRequired[Type] + + +class TranscriptionSegmentChunk(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + start: float + + end: float + + score: OptionalNullable[float] = UNSET + + speaker_id: OptionalNullable[str] = UNSET + + type: Optional[Type] = "transcription_segment" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["score", "speaker_id", "type"] + nullable_fields = ["score", "speaker_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/client/models/transcriptionstreamdone.py b/src/mistralai/client/models/transcriptionstreamdone.py new file mode 100644 index 00000000..add17f56 --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamdone.py @@ -0,0 +1,85 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .transcriptionsegmentchunk import ( + TranscriptionSegmentChunk, + TranscriptionSegmentChunkTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +TranscriptionStreamDoneType = Literal["transcription.done",] + + +class TranscriptionStreamDoneTypedDict(TypedDict): + model: str + text: str + usage: UsageInfoTypedDict + language: Nullable[str] + segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] + type: NotRequired[TranscriptionStreamDoneType] + + +class TranscriptionStreamDone(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + model: str + + text: str + + usage: UsageInfo + + language: Nullable[str] + + segments: Optional[List[TranscriptionSegmentChunk]] = None + + type: Optional[TranscriptionStreamDoneType] = "transcription.done" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["segments", "type"] + nullable_fields = ["language"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/client/models/transcriptionstreamevents.py b/src/mistralai/client/models/transcriptionstreamevents.py new file mode 100644 index 00000000..caaf943a --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamevents.py @@ -0,0 +1,58 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .transcriptionstreamdone import ( + TranscriptionStreamDone, + TranscriptionStreamDoneTypedDict, +) +from .transcriptionstreameventtypes import TranscriptionStreamEventTypes +from .transcriptionstreamlanguage import ( + TranscriptionStreamLanguage, + TranscriptionStreamLanguageTypedDict, +) +from .transcriptionstreamsegmentdelta import ( + TranscriptionStreamSegmentDelta, + TranscriptionStreamSegmentDeltaTypedDict, +) +from .transcriptionstreamtextdelta import ( + TranscriptionStreamTextDelta, + TranscriptionStreamTextDeltaTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +TranscriptionStreamEventsDataTypedDict = TypeAliasType( + "TranscriptionStreamEventsDataTypedDict", + Union[ + TranscriptionStreamTextDeltaTypedDict, + TranscriptionStreamLanguageTypedDict, + TranscriptionStreamSegmentDeltaTypedDict, + TranscriptionStreamDoneTypedDict, + ], +) + + +TranscriptionStreamEventsData = Annotated[ + Union[ + Annotated[TranscriptionStreamDone, Tag("transcription.done")], + Annotated[TranscriptionStreamLanguage, Tag("transcription.language")], + Annotated[TranscriptionStreamSegmentDelta, Tag("transcription.segment")], + Annotated[TranscriptionStreamTextDelta, Tag("transcription.text.delta")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class TranscriptionStreamEventsTypedDict(TypedDict): + event: TranscriptionStreamEventTypes + data: TranscriptionStreamEventsDataTypedDict + + +class TranscriptionStreamEvents(BaseModel): + event: TranscriptionStreamEventTypes + + data: TranscriptionStreamEventsData diff --git a/src/mistralai/client/models/transcriptionstreameventtypes.py b/src/mistralai/client/models/transcriptionstreameventtypes.py new file mode 100644 index 00000000..4a910f0a --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreameventtypes.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +TranscriptionStreamEventTypes = Literal[ + "transcription.language", + "transcription.segment", + "transcription.text.delta", + "transcription.done", +] diff --git a/src/mistralai/client/models/transcriptionstreamlanguage.py b/src/mistralai/client/models/transcriptionstreamlanguage.py new file mode 100644 index 00000000..b47024ad --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamlanguage.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +import pydantic +from pydantic import ConfigDict +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +TranscriptionStreamLanguageType = Literal["transcription.language",] + + +class TranscriptionStreamLanguageTypedDict(TypedDict): + audio_language: str + type: NotRequired[TranscriptionStreamLanguageType] + + +class TranscriptionStreamLanguage(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + audio_language: str + + type: Optional[TranscriptionStreamLanguageType] = "transcription.language" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/client/models/transcriptionstreamsegmentdelta.py b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py new file mode 100644 index 00000000..7cfffb63 --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py @@ -0,0 +1,83 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +TranscriptionStreamSegmentDeltaType = Literal["transcription.segment",] + + +class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): + text: str + start: float + end: float + speaker_id: NotRequired[Nullable[str]] + type: NotRequired[TranscriptionStreamSegmentDeltaType] + + +class TranscriptionStreamSegmentDelta(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + start: float + + end: float + + speaker_id: OptionalNullable[str] = UNSET + + type: Optional[TranscriptionStreamSegmentDeltaType] = "transcription.segment" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["speaker_id", "type"] + nullable_fields = ["speaker_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/client/models/transcriptionstreamtextdelta.py b/src/mistralai/client/models/transcriptionstreamtextdelta.py new file mode 100644 index 00000000..ce279cf6 --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamtextdelta.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +import pydantic +from pydantic import ConfigDict +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +TranscriptionStreamTextDeltaType = Literal["transcription.text.delta",] + + +class TranscriptionStreamTextDeltaTypedDict(TypedDict): + text: str + type: NotRequired[TranscriptionStreamTextDeltaType] + + +class TranscriptionStreamTextDelta(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + type: Optional[TranscriptionStreamTextDeltaType] = "transcription.text.delta" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/client/models/unarchiveftmodelout.py b/src/mistralai/client/models/unarchiveftmodelout.py new file mode 100644 index 00000000..511c390b --- /dev/null +++ b/src/mistralai/client/models/unarchiveftmodelout.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +UnarchiveFTModelOutObject = Literal["model",] + + +class UnarchiveFTModelOutTypedDict(TypedDict): + id: str + object: NotRequired[UnarchiveFTModelOutObject] + archived: NotRequired[bool] + + +class UnarchiveFTModelOut(BaseModel): + id: str + + object: Optional[UnarchiveFTModelOutObject] = "model" + + archived: Optional[bool] = False diff --git a/src/mistralai/client/models/updateftmodelin.py b/src/mistralai/client/models/updateftmodelin.py new file mode 100644 index 00000000..0471a154 --- /dev/null +++ b/src/mistralai/client/models/updateftmodelin.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class UpdateFTModelInTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class UpdateFTModelIn(BaseModel): + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["name", "description"] + nullable_fields = ["name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/uploadfileout.py b/src/mistralai/client/models/uploadfileout.py new file mode 100644 index 00000000..55e56504 --- /dev/null +++ b/src/mistralai/client/models/uploadfileout.py @@ -0,0 +1,94 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .filepurpose import FilePurpose +from .sampletype import SampleType +from .source import Source +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict + + +class UploadFileOutTypedDict(TypedDict): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + size_bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + purpose: FilePurpose + sample_type: SampleType + source: Source + num_lines: NotRequired[Nullable[int]] + mimetype: NotRequired[Nullable[str]] + signature: NotRequired[Nullable[str]] + + +class UploadFileOut(BaseModel): + id: str + r"""The unique identifier of the file.""" + + object: str + r"""The object type, which is always \"file\".""" + + size_bytes: Annotated[int, pydantic.Field(alias="bytes")] + r"""The size of the file, in bytes.""" + + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + + filename: str + r"""The name of the uploaded file.""" + + purpose: FilePurpose + + sample_type: SampleType + + source: Source + + num_lines: OptionalNullable[int] = UNSET + + mimetype: OptionalNullable[str] = UNSET + + signature: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["num_lines", "mimetype", "signature"] + nullable_fields = ["num_lines", "mimetype", "signature"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/usageinfo.py b/src/mistralai/client/models/usageinfo.py new file mode 100644 index 00000000..f1186d97 --- /dev/null +++ b/src/mistralai/client/models/usageinfo.py @@ -0,0 +1,82 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class UsageInfoTypedDict(TypedDict): + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + prompt_audio_seconds: NotRequired[Nullable[int]] + + +class UsageInfo(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + prompt_tokens: Optional[int] = 0 + + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + prompt_audio_seconds: OptionalNullable[int] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + nullable_fields = ["prompt_audio_seconds"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/client/models/usermessage.py b/src/mistralai/client/models/usermessage.py new file mode 100644 index 00000000..8d92cea8 --- /dev/null +++ b/src/mistralai/client/models/usermessage.py @@ -0,0 +1,60 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +UserMessageContentTypedDict = TypeAliasType( + "UserMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) + + +UserMessageRole = Literal["user",] + + +class UserMessageTypedDict(TypedDict): + content: Nullable[UserMessageContentTypedDict] + role: NotRequired[UserMessageRole] + + +class UserMessage(BaseModel): + content: Nullable[UserMessageContent] + + role: Optional[UserMessageRole] = "user" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["role"] + nullable_fields = ["content"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/validationerror.py b/src/mistralai/client/models/validationerror.py new file mode 100644 index 00000000..352409be --- /dev/null +++ b/src/mistralai/client/models/validationerror.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import List, Union +from typing_extensions import TypeAliasType, TypedDict + + +LocTypedDict = TypeAliasType("LocTypedDict", Union[str, int]) + + +Loc = TypeAliasType("Loc", Union[str, int]) + + +class ValidationErrorTypedDict(TypedDict): + loc: List[LocTypedDict] + msg: str + type: str + + +class ValidationError(BaseModel): + loc: List[Loc] + + msg: str + + type: str diff --git a/src/mistralai/client/models/wandbintegration.py b/src/mistralai/client/models/wandbintegration.py new file mode 100644 index 00000000..89489fb4 --- /dev/null +++ b/src/mistralai/client/models/wandbintegration.py @@ -0,0 +1,72 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +WandbIntegrationType = Literal["wandb",] + + +class WandbIntegrationTypedDict(TypedDict): + project: str + r"""The name of the project that the new run will be created under.""" + api_key: str + r"""The WandB API key to use for authentication.""" + type: NotRequired[WandbIntegrationType] + name: NotRequired[Nullable[str]] + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: NotRequired[Nullable[str]] + + +class WandbIntegration(BaseModel): + project: str + r"""The name of the project that the new run will be created under.""" + + api_key: str + r"""The WandB API key to use for authentication.""" + + type: Optional[WandbIntegrationType] = "wandb" + + name: OptionalNullable[str] = UNSET + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + + run_name: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "name", "run_name"] + nullable_fields = ["name", "run_name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/wandbintegrationout.py b/src/mistralai/client/models/wandbintegrationout.py new file mode 100644 index 00000000..a7f9afeb --- /dev/null +++ b/src/mistralai/client/models/wandbintegrationout.py @@ -0,0 +1,70 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +WandbIntegrationOutType = Literal["wandb",] + + +class WandbIntegrationOutTypedDict(TypedDict): + project: str + r"""The name of the project that the new run will be created under.""" + type: NotRequired[WandbIntegrationOutType] + name: NotRequired[Nullable[str]] + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: NotRequired[Nullable[str]] + url: NotRequired[Nullable[str]] + + +class WandbIntegrationOut(BaseModel): + project: str + r"""The name of the project that the new run will be created under.""" + + type: Optional[WandbIntegrationOutType] = "wandb" + + name: OptionalNullable[str] = UNSET + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + + run_name: OptionalNullable[str] = UNSET + + url: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "name", "run_name", "url"] + nullable_fields = ["name", "run_name", "url"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/websearchpremiumtool.py b/src/mistralai/client/models/websearchpremiumtool.py new file mode 100644 index 00000000..8d2d4b5d --- /dev/null +++ b/src/mistralai/client/models/websearchpremiumtool.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +WebSearchPremiumToolType = Literal["web_search_premium",] + + +class WebSearchPremiumToolTypedDict(TypedDict): + type: NotRequired[WebSearchPremiumToolType] + + +class WebSearchPremiumTool(BaseModel): + type: Optional[WebSearchPremiumToolType] = "web_search_premium" diff --git a/src/mistralai/client/models/websearchtool.py b/src/mistralai/client/models/websearchtool.py new file mode 100644 index 00000000..ba4cc09f --- /dev/null +++ b/src/mistralai/client/models/websearchtool.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +WebSearchToolType = Literal["web_search",] + + +class WebSearchToolTypedDict(TypedDict): + type: NotRequired[WebSearchToolType] + + +class WebSearchTool(BaseModel): + type: Optional[WebSearchToolType] = "web_search" diff --git a/src/mistralai/client/models_.py b/src/mistralai/client/models_.py new file mode 100644 index 00000000..5ef9da09 --- /dev/null +++ b/src/mistralai/client/models_.py @@ -0,0 +1,1063 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Mapping, Optional + + +class Models(BaseSDK): + r"""Model Management API""" + + def list( + self, + *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ModelList: + r"""List Models + + List all models available to the user. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + req = self._build_request( + method="GET", + path="/v1/models", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="list_models_v1_models_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ModelList, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ModelList: + r"""List Models + + List all models available to the user. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + req = self._build_request_async( + method="GET", + path="/v1/models", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="list_models_v1_models_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ModelList, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def retrieve( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: + r"""Retrieve Model + + Retrieve information about a model. + + :param model_id: The ID of the model to retrieve. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.RetrieveModelV1ModelsModelIDGetRequest( + model_id=model_id, + ) + + req = self._build_request( + method="GET", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="retrieve_model_v1_models__model_id__get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, + http_res, + ) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def retrieve_async( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: + r"""Retrieve Model + + Retrieve information about a model. + + :param model_id: The ID of the model to retrieve. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.RetrieveModelV1ModelsModelIDGetRequest( + model_id=model_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="retrieve_model_v1_models__model_id__get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, + http_res, + ) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DeleteModelOut: + r"""Delete Model + + Delete a fine-tuned model. + + :param model_id: The ID of the model to delete. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteModelV1ModelsModelIDDeleteRequest( + model_id=model_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="delete_model_v1_models__model_id__delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DeleteModelOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DeleteModelOut: + r"""Delete Model + + Delete a fine-tuned model. + + :param model_id: The ID of the model to delete. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteModelV1ModelsModelIDDeleteRequest( + model_id=model_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="delete_model_v1_models__model_id__delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DeleteModelOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def update( + self, + *, + model_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: + r"""Update Fine Tuned Model + + Update a model name or description. + + :param model_id: The ID of the model to update. + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( + model_id=model_id, + update_ft_model_in=models.UpdateFTModelIn( + name=name, + description=description, + ), + ) + + req = self._build_request( + method="PATCH", + path="/v1/fine_tuning/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def update_async( + self, + *, + model_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: + r"""Update Fine Tuned Model + + Update a model name or description. + + :param model_id: The ID of the model to update. + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( + model_id=model_id, + update_ft_model_in=models.UpdateFTModelIn( + name=name, + description=description, + ), + ) + + req = self._build_request_async( + method="PATCH", + path="/v1/fine_tuning/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def archive( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ArchiveFTModelOut: + r"""Archive Fine Tuned Model + + Archive a fine-tuned model. + + :param model_id: The ID of the model to archive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self._build_request( + method="POST", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ArchiveFTModelOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def archive_async( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ArchiveFTModelOut: + r"""Archive Fine Tuned Model + + Archive a fine-tuned model. + + :param model_id: The ID of the model to archive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ArchiveFTModelOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def unarchive( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.UnarchiveFTModelOut: + r"""Unarchive Fine Tuned Model + + Un-archive a fine-tuned model. + + :param model_id: The ID of the model to unarchive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.UnarchiveFTModelOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def unarchive_async( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.UnarchiveFTModelOut: + r"""Unarchive Fine Tuned Model + + Un-archive a fine-tuned model. + + :param model_id: The ID of the model to unarchive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.UnarchiveFTModelOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/ocr.py b/src/mistralai/client/ocr.py new file mode 100644 index 00000000..ce7e2126 --- /dev/null +++ b/src/mistralai/client/ocr.py @@ -0,0 +1,303 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + ocrrequest as models_ocrrequest, + responseformat as models_responseformat, +) +from mistralai.client.types import Nullable, OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, List, Mapping, Optional, Union + + +class Ocr(BaseSDK): + r"""OCR API""" + + def process( + self, + *, + model: Nullable[str], + document: Union[ + models_ocrrequest.Document, models_ocrrequest.DocumentTypedDict + ], + id: Optional[str] = None, + pages: OptionalNullable[List[int]] = UNSET, + include_image_base64: OptionalNullable[bool] = UNSET, + image_limit: OptionalNullable[int] = UNSET, + image_min_size: OptionalNullable[int] = UNSET, + bbox_annotation_format: OptionalNullable[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = UNSET, + document_annotation_format: OptionalNullable[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = UNSET, + document_annotation_prompt: OptionalNullable[str] = UNSET, + table_format: OptionalNullable[models_ocrrequest.TableFormat] = UNSET, + extract_header: Optional[bool] = None, + extract_footer: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.OCRResponse: + r"""OCR + + :param model: + :param document: Document to run OCR on + :param id: + :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 + :param include_image_base64: Include image URLs in response + :param image_limit: Max images to extract + :param image_min_size: Minimum height and width of image to extract + :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field + :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. + :param table_format: + :param extract_header: + :param extract_footer: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.OCRRequest( + model=model, + id=id, + document=utils.get_pydantic_model(document, models.Document), + pages=pages, + include_image_base64=include_image_base64, + image_limit=image_limit, + image_min_size=image_min_size, + bbox_annotation_format=utils.get_pydantic_model( + bbox_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_format=utils.get_pydantic_model( + document_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_prompt=document_annotation_prompt, + table_format=table_format, + extract_header=extract_header, + extract_footer=extract_footer, + ) + + req = self._build_request( + method="POST", + path="/v1/ocr", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.OCRRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="ocr_v1_ocr_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.OCRResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def process_async( + self, + *, + model: Nullable[str], + document: Union[ + models_ocrrequest.Document, models_ocrrequest.DocumentTypedDict + ], + id: Optional[str] = None, + pages: OptionalNullable[List[int]] = UNSET, + include_image_base64: OptionalNullable[bool] = UNSET, + image_limit: OptionalNullable[int] = UNSET, + image_min_size: OptionalNullable[int] = UNSET, + bbox_annotation_format: OptionalNullable[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = UNSET, + document_annotation_format: OptionalNullable[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = UNSET, + document_annotation_prompt: OptionalNullable[str] = UNSET, + table_format: OptionalNullable[models_ocrrequest.TableFormat] = UNSET, + extract_header: Optional[bool] = None, + extract_footer: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.OCRResponse: + r"""OCR + + :param model: + :param document: Document to run OCR on + :param id: + :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 + :param include_image_base64: Include image URLs in response + :param image_limit: Max images to extract + :param image_min_size: Minimum height and width of image to extract + :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field + :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. + :param table_format: + :param extract_header: + :param extract_footer: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.OCRRequest( + model=model, + id=id, + document=utils.get_pydantic_model(document, models.Document), + pages=pages, + include_image_base64=include_image_base64, + image_limit=image_limit, + image_min_size=image_min_size, + bbox_annotation_format=utils.get_pydantic_model( + bbox_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_format=utils.get_pydantic_model( + document_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_prompt=document_annotation_prompt, + table_format=table_format, + extract_header=extract_header, + extract_footer=extract_footer, + ) + + req = self._build_request_async( + method="POST", + path="/v1/ocr", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.OCRRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="ocr_v1_ocr_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.OCRResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/py.typed b/src/mistralai/client/py.typed new file mode 100644 index 00000000..3e38f1a9 --- /dev/null +++ b/src/mistralai/client/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/src/mistralai/client/sdk.py b/src/mistralai/client/sdk.py new file mode 100644 index 00000000..99579400 --- /dev/null +++ b/src/mistralai/client/sdk.py @@ -0,0 +1,222 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients +from .sdkconfiguration import SDKConfiguration +from .utils.logger import Logger, get_default_logger +from .utils.retries import RetryConfig +import httpx +import importlib +from mistralai.client import models, utils +from mistralai.client._hooks import SDKHooks +from mistralai.client.types import OptionalNullable, UNSET +import sys +from typing import Any, Callable, Dict, Optional, TYPE_CHECKING, Union, cast +import weakref + +if TYPE_CHECKING: + from mistralai.client.agents import Agents + from mistralai.client.audio import Audio + from mistralai.client.batch import Batch + from mistralai.client.beta import Beta + from mistralai.client.chat import Chat + from mistralai.client.classifiers import Classifiers + from mistralai.client.embeddings import Embeddings + from mistralai.client.files import Files + from mistralai.client.fim import Fim + from mistralai.client.fine_tuning import FineTuning + from mistralai.client.models_ import Models + from mistralai.client.ocr import Ocr + + +class Mistral(BaseSDK): + r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" + + models: "Models" + r"""Model Management API""" + beta: "Beta" + files: "Files" + r"""Files API""" + fine_tuning: "FineTuning" + batch: "Batch" + chat: "Chat" + r"""Chat Completion API.""" + fim: "Fim" + r"""Fill-in-the-middle API.""" + agents: "Agents" + r"""Agents API.""" + embeddings: "Embeddings" + r"""Embeddings API.""" + classifiers: "Classifiers" + r"""Classifiers API.""" + ocr: "Ocr" + r"""OCR API""" + audio: "Audio" + _sub_sdk_map = { + "models": ("mistralai.client.models_", "Models"), + "beta": ("mistralai.client.beta", "Beta"), + "files": ("mistralai.client.files", "Files"), + "fine_tuning": ("mistralai.client.fine_tuning", "FineTuning"), + "batch": ("mistralai.client.batch", "Batch"), + "chat": ("mistralai.client.chat", "Chat"), + "fim": ("mistralai.client.fim", "Fim"), + "agents": ("mistralai.client.agents", "Agents"), + "embeddings": ("mistralai.client.embeddings", "Embeddings"), + "classifiers": ("mistralai.client.classifiers", "Classifiers"), + "ocr": ("mistralai.client.ocr", "Ocr"), + "audio": ("mistralai.client.audio", "Audio"), + } + + def __init__( + self, + api_key: Optional[Union[Optional[str], Callable[[], Optional[str]]]] = None, + server: Optional[str] = None, + server_url: Optional[str] = None, + url_params: Optional[Dict[str, str]] = None, + client: Optional[HttpClient] = None, + async_client: Optional[AsyncHttpClient] = None, + retry_config: OptionalNullable[RetryConfig] = UNSET, + timeout_ms: Optional[int] = None, + debug_logger: Optional[Logger] = None, + ) -> None: + r"""Instantiates the SDK configuring it with the provided parameters. + + :param api_key: The api_key required for authentication + :param server: The server by name to use for all methods + :param server_url: The server URL to use for all methods + :param url_params: Parameters to optionally template the server URL with + :param client: The HTTP client to use for all synchronous methods + :param async_client: The Async HTTP client to use for all asynchronous methods + :param retry_config: The retry configuration to use for all supported methods + :param timeout_ms: Optional request timeout applied to each operation in milliseconds + """ + client_supplied = True + if client is None: + client = httpx.Client(follow_redirects=True) + client_supplied = False + + assert issubclass( + type(client), HttpClient + ), "The provided client must implement the HttpClient protocol." + + async_client_supplied = True + if async_client is None: + async_client = httpx.AsyncClient(follow_redirects=True) + async_client_supplied = False + + if debug_logger is None: + debug_logger = get_default_logger() + + assert issubclass( + type(async_client), AsyncHttpClient + ), "The provided async_client must implement the AsyncHttpClient protocol." + + security: Any = None + if callable(api_key): + # pylint: disable=unnecessary-lambda-assignment + security = lambda: models.Security(api_key=api_key()) + else: + security = models.Security(api_key=api_key) + + if server_url is not None: + if url_params is not None: + server_url = utils.template_url(server_url, url_params) + + BaseSDK.__init__( + self, + SDKConfiguration( + client=client, + client_supplied=client_supplied, + async_client=async_client, + async_client_supplied=async_client_supplied, + security=security, + server_url=server_url, + server=server, + retry_config=retry_config, + timeout_ms=timeout_ms, + debug_logger=debug_logger, + ), + parent_ref=self, + ) + + hooks = SDKHooks() + + # pylint: disable=protected-access + self.sdk_configuration.__dict__["_hooks"] = hooks + + current_server_url, *_ = self.sdk_configuration.get_server_details() + server_url, self.sdk_configuration.client = hooks.sdk_init( + current_server_url, client + ) + if current_server_url != server_url: + self.sdk_configuration.server_url = server_url + + weakref.finalize( + self, + close_clients, + cast(ClientOwner, self.sdk_configuration), + self.sdk_configuration.client, + self.sdk_configuration.client_supplied, + self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, + ) + + def dynamic_import(self, modname, retries=3): + for attempt in range(retries): + try: + return importlib.import_module(modname) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + def __getattr__(self, name: str): + if name in self._sub_sdk_map: + module_path, class_name = self._sub_sdk_map[name] + try: + module = self.dynamic_import(module_path) + klass = getattr(module, class_name) + instance = klass(self.sdk_configuration, parent_ref=self) + setattr(self, name, instance) + return instance + except ImportError as e: + raise AttributeError( + f"Failed to import module {module_path} for attribute {name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to find class {class_name} in module {module_path} for attribute {name}: {e}" + ) from e + + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{name}'" + ) + + def __dir__(self): + default_attrs = list(super().__dir__()) + lazy_attrs = list(self._sub_sdk_map.keys()) + return sorted(list(set(default_attrs + lazy_attrs))) + + def __enter__(self): + return self + + async def __aenter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): + self.sdk_configuration.client.close() + self.sdk_configuration.client = None + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): + await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None diff --git a/src/mistralai/client/sdkconfiguration.py b/src/mistralai/client/sdkconfiguration.py new file mode 100644 index 00000000..df50d16f --- /dev/null +++ b/src/mistralai/client/sdkconfiguration.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from ._version import ( + __gen_version__, + __openapi_doc_version__, + __user_agent__, + __version__, +) +from .httpclient import AsyncHttpClient, HttpClient +from .utils import Logger, RetryConfig, remove_suffix +from dataclasses import dataclass +from mistralai.client import models +from mistralai.client.types import OptionalNullable, UNSET +from pydantic import Field +from typing import Callable, Dict, Optional, Tuple, Union + + +SERVER_EU = "eu" +r"""EU Production server""" +SERVERS = { + SERVER_EU: "https://api.mistral.ai", +} +"""Contains the list of servers available to the SDK""" + + +@dataclass +class SDKConfiguration: + client: Union[HttpClient, None] + client_supplied: bool + async_client: Union[AsyncHttpClient, None] + async_client_supplied: bool + debug_logger: Logger + security: Optional[Union[models.Security, Callable[[], models.Security]]] = None + server_url: Optional[str] = "" + server: Optional[str] = "" + language: str = "python" + openapi_doc_version: str = __openapi_doc_version__ + sdk_version: str = __version__ + gen_version: str = __gen_version__ + user_agent: str = __user_agent__ + retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) + timeout_ms: Optional[int] = None + + def get_server_details(self) -> Tuple[str, Dict[str, str]]: + if self.server_url is not None and self.server_url: + return remove_suffix(self.server_url, "/"), {} + if not self.server: + self.server = SERVER_EU + + if self.server not in SERVERS: + raise ValueError(f'Invalid server "{self.server}"') + + return SERVERS[self.server], {} diff --git a/src/mistralai/client/transcriptions.py b/src/mistralai/client/transcriptions.py new file mode 100644 index 00000000..45501024 --- /dev/null +++ b/src/mistralai/client/transcriptions.py @@ -0,0 +1,481 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + file as models_file, + timestampgranularity as models_timestampgranularity, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import List, Mapping, Optional, Union + + +class Transcriptions(BaseSDK): + r"""API for audio transcription.""" + + def complete( + self, + *, + model: str, + file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, + timestamp_granularities: Optional[ + List[models_timestampgranularity.TimestampGranularity] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.TranscriptionResponse: + r"""Create Transcription + + :param model: ID of the model to be used. + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param diarize: + :param context_bias: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequest( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + diarize=diarize, + context_bias=context_bias, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request( + method="POST", + path="/v1/audio/transcriptions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "multipart", models.AudioTranscriptionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.TranscriptionResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + model: str, + file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, + timestamp_granularities: Optional[ + List[models_timestampgranularity.TimestampGranularity] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.TranscriptionResponse: + r"""Create Transcription + + :param model: ID of the model to be used. + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param diarize: + :param context_bias: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequest( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + diarize=diarize, + context_bias=context_bias, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request_async( + method="POST", + path="/v1/audio/transcriptions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "multipart", models.AudioTranscriptionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.TranscriptionResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def stream( + self, + *, + model: str, + file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, + timestamp_granularities: Optional[ + List[models_timestampgranularity.TimestampGranularity] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.TranscriptionStreamEvents]: + r"""Create Streaming Transcription (SSE) + + :param model: + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param diarize: + :param context_bias: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequestStream( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + diarize=diarize, + context_bias=context_bias, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request( + method="POST", + path="/v1/audio/transcriptions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "multipart", + models.AudioTranscriptionRequestStream, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.TranscriptionStreamEvents), + client_ref=self, + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + model: str, + file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, + timestamp_granularities: Optional[ + List[models_timestampgranularity.TimestampGranularity] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.TranscriptionStreamEvents]: + r"""Create Streaming Transcription (SSE) + + :param model: + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param diarize: + :param context_bias: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequestStream( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + diarize=diarize, + context_bias=context_bias, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request_async( + method="POST", + path="/v1/audio/transcriptions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "multipart", + models.AudioTranscriptionRequestStream, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.TranscriptionStreamEvents), + client_ref=self, + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/types/__init__.py b/src/mistralai/client/types/__init__.py new file mode 100644 index 00000000..fc76fe0c --- /dev/null +++ b/src/mistralai/client/types/__init__.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basemodel import ( + BaseModel, + Nullable, + OptionalNullable, + UnrecognizedInt, + UnrecognizedStr, + UNSET, + UNSET_SENTINEL, +) + +__all__ = [ + "BaseModel", + "Nullable", + "OptionalNullable", + "UnrecognizedInt", + "UnrecognizedStr", + "UNSET", + "UNSET_SENTINEL", +] diff --git a/src/mistralai/client/types/basemodel.py b/src/mistralai/client/types/basemodel.py new file mode 100644 index 00000000..a9a640a1 --- /dev/null +++ b/src/mistralai/client/types/basemodel.py @@ -0,0 +1,77 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from pydantic import ConfigDict, model_serializer +from pydantic import BaseModel as PydanticBaseModel +from pydantic_core import core_schema +from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union +from typing_extensions import TypeAliasType, TypeAlias + + +class BaseModel(PydanticBaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() + ) + + +class Unset(BaseModel): + @model_serializer(mode="plain") + def serialize_model(self): + return UNSET_SENTINEL + + def __bool__(self) -> Literal[False]: + return False + + +UNSET = Unset() +UNSET_SENTINEL = "~?~unset~?~sentinel~?~" + + +T = TypeVar("T") +if TYPE_CHECKING: + Nullable: TypeAlias = Union[T, None] + OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] +else: + Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) + OptionalNullable = TypeAliasType( + "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) + ) + + +class UnrecognizedStr(str): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedStr only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedStr': + if isinstance(v, cls): + return v + return cls(str(v)) + + # Use lax_or_strict_schema where strict always fails + # This forces Pydantic to prefer other union members in strict mode + # and only fall back to UnrecognizedStr in lax mode + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.str_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) + + +class UnrecognizedInt(int): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedInt only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedInt': + if isinstance(v, cls): + return v + return cls(int(v)) + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.int_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) diff --git a/src/mistralai/client/utils/__init__.py b/src/mistralai/client/utils/__init__.py new file mode 100644 index 00000000..f9c2edce --- /dev/null +++ b/src/mistralai/client/utils/__init__.py @@ -0,0 +1,197 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import TYPE_CHECKING +from importlib import import_module +import builtins +import sys + +if TYPE_CHECKING: + from .annotations import get_discriminator + from .datetimes import parse_datetime + from .enums import OpenEnumMeta + from .headers import get_headers, get_response_headers + from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, + ) + from .queryparams import get_query_params + from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig + from .requestbodies import serialize_request_body, SerializedRequestBody + from .security import get_security, get_security_from_env + + from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, + validate_decimal, + validate_float, + validate_int, + ) + from .url import generate_url, template_url, remove_suffix + from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, + cast_partial, + ) + from .logger import Logger, get_body_content, get_default_logger + +__all__ = [ + "BackoffStrategy", + "FieldMetadata", + "find_metadata", + "FormMetadata", + "generate_url", + "get_body_content", + "get_default_logger", + "get_discriminator", + "parse_datetime", + "get_global_from_env", + "get_headers", + "get_pydantic_model", + "get_query_params", + "get_response_headers", + "get_security", + "get_security_from_env", + "HeaderMetadata", + "Logger", + "marshal_json", + "match_content_type", + "match_status_codes", + "match_response", + "MultipartFormMetadata", + "OpenEnumMeta", + "PathParamMetadata", + "QueryParamMetadata", + "remove_suffix", + "Retries", + "retry", + "retry_async", + "RetryConfig", + "RequestMetadata", + "SecurityMetadata", + "serialize_decimal", + "serialize_float", + "serialize_int", + "serialize_request_body", + "SerializedRequestBody", + "stream_to_text", + "stream_to_text_async", + "stream_to_bytes", + "stream_to_bytes_async", + "template_url", + "unmarshal", + "unmarshal_json", + "validate_decimal", + "validate_const", + "validate_float", + "validate_int", + "cast_partial", +] + +_dynamic_imports: dict[str, str] = { + "BackoffStrategy": ".retries", + "FieldMetadata": ".metadata", + "find_metadata": ".metadata", + "FormMetadata": ".metadata", + "generate_url": ".url", + "get_body_content": ".logger", + "get_default_logger": ".logger", + "get_discriminator": ".annotations", + "parse_datetime": ".datetimes", + "get_global_from_env": ".values", + "get_headers": ".headers", + "get_pydantic_model": ".serializers", + "get_query_params": ".queryparams", + "get_response_headers": ".headers", + "get_security": ".security", + "get_security_from_env": ".security", + "HeaderMetadata": ".metadata", + "Logger": ".logger", + "marshal_json": ".serializers", + "match_content_type": ".values", + "match_status_codes": ".values", + "match_response": ".values", + "MultipartFormMetadata": ".metadata", + "OpenEnumMeta": ".enums", + "PathParamMetadata": ".metadata", + "QueryParamMetadata": ".metadata", + "remove_suffix": ".url", + "Retries": ".retries", + "retry": ".retries", + "retry_async": ".retries", + "RetryConfig": ".retries", + "RequestMetadata": ".metadata", + "SecurityMetadata": ".metadata", + "serialize_decimal": ".serializers", + "serialize_float": ".serializers", + "serialize_int": ".serializers", + "serialize_request_body": ".requestbodies", + "SerializedRequestBody": ".requestbodies", + "stream_to_text": ".serializers", + "stream_to_text_async": ".serializers", + "stream_to_bytes": ".serializers", + "stream_to_bytes_async": ".serializers", + "template_url": ".url", + "unmarshal": ".serializers", + "unmarshal_json": ".serializers", + "validate_decimal": ".serializers", + "validate_const": ".serializers", + "validate_float": ".serializers", + "validate_int": ".serializers", + "cast_partial": ".values", +} + + +def dynamic_import(modname, retries=3): + for attempt in range(retries): + try: + return import_module(modname, __package__) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError( + f"no {attr_name} found in _dynamic_imports, module name -> {__name__} " + ) + + try: + module = dynamic_import(module_name) + return getattr(module, attr_name) + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + +def __dir__(): + lazy_attrs = builtins.list(_dynamic_imports.keys()) + return builtins.sorted(lazy_attrs) diff --git a/src/mistralai/client/utils/annotations.py b/src/mistralai/client/utils/annotations.py new file mode 100644 index 00000000..12e0aa4f --- /dev/null +++ b/src/mistralai/client/utils/annotations.py @@ -0,0 +1,79 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from enum import Enum +from typing import Any, Optional + + +def get_discriminator(model: Any, fieldname: str, key: str) -> str: + """ + Recursively search for the discriminator attribute in a model. + + Args: + model (Any): The model to search within. + fieldname (str): The name of the field to search for. + key (str): The key to search for in dictionaries. + + Returns: + str: The name of the discriminator attribute. + + Raises: + ValueError: If the discriminator attribute is not found. + """ + upper_fieldname = fieldname.upper() + + def get_field_discriminator(field: Any) -> Optional[str]: + """Search for the discriminator attribute in a given field.""" + + if isinstance(field, dict): + if key in field: + return f"{field[key]}" + + if hasattr(field, fieldname): + attr = getattr(field, fieldname) + if isinstance(attr, Enum): + return f"{attr.value}" + return f"{attr}" + + if hasattr(field, upper_fieldname): + attr = getattr(field, upper_fieldname) + if isinstance(attr, Enum): + return f"{attr.value}" + return f"{attr}" + + return None + + def search_nested_discriminator(obj: Any) -> Optional[str]: + """Recursively search for discriminator in nested structures.""" + # First try direct field lookup + discriminator = get_field_discriminator(obj) + if discriminator is not None: + return discriminator + + # If it's a dict, search in nested values + if isinstance(obj, dict): + for value in obj.values(): + if isinstance(value, list): + # Search in list items + for item in value: + nested_discriminator = search_nested_discriminator(item) + if nested_discriminator is not None: + return nested_discriminator + elif isinstance(value, dict): + # Search in nested dict + nested_discriminator = search_nested_discriminator(value) + if nested_discriminator is not None: + return nested_discriminator + + return None + + if isinstance(model, list): + for field in model: + discriminator = search_nested_discriminator(field) + if discriminator is not None: + return discriminator + + discriminator = search_nested_discriminator(model) + if discriminator is not None: + return discriminator + + raise ValueError(f"Could not find discriminator field {fieldname} in {model}") diff --git a/src/mistralai/client/utils/datetimes.py b/src/mistralai/client/utils/datetimes.py new file mode 100644 index 00000000..a6c52cd6 --- /dev/null +++ b/src/mistralai/client/utils/datetimes.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +import sys + + +def parse_datetime(datetime_string: str) -> datetime: + """ + Convert a RFC 3339 / ISO 8601 formatted string into a datetime object. + Python versions 3.11 and later support parsing RFC 3339 directly with + datetime.fromisoformat(), but for earlier versions, this function + encapsulates the necessary extra logic. + """ + # Python 3.11 and later can parse RFC 3339 directly + if sys.version_info >= (3, 11): + return datetime.fromisoformat(datetime_string) + + # For Python 3.10 and earlier, a common ValueError is trailing 'Z' suffix, + # so fix that upfront. + if datetime_string.endswith("Z"): + datetime_string = datetime_string[:-1] + "+00:00" + + return datetime.fromisoformat(datetime_string) diff --git a/src/mistralai/client/utils/enums.py b/src/mistralai/client/utils/enums.py new file mode 100644 index 00000000..3324e1bc --- /dev/null +++ b/src/mistralai/client/utils/enums.py @@ -0,0 +1,134 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import enum +import sys +from typing import Any + +from pydantic_core import core_schema + + +class OpenEnumMeta(enum.EnumMeta): + # The __call__ method `boundary` kwarg was added in 3.11 and must be present + # for pyright. Refer also: https://github.com/pylint-dev/pylint/issues/9622 + # pylint: disable=unexpected-keyword-arg + # The __call__ method `values` varg must be named for pyright. + # pylint: disable=keyword-arg-before-vararg + + if sys.version_info >= (3, 11): + def __call__( + cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + except ValueError: + return value + else: + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value + + def __new__(mcs, name, bases, namespace, **kwargs): + cls = super().__new__(mcs, name, bases, namespace, **kwargs) + + # Add __get_pydantic_core_schema__ to make open enums work correctly + # in union discrimination. In strict mode (used by Pydantic for unions), + # only known enum values match. In lax mode, unknown values are accepted. + def __get_pydantic_core_schema__( + cls_inner: Any, _source_type: Any, _handler: Any + ) -> core_schema.CoreSchema: + # Create a validator that only accepts known enum values (for strict mode) + def validate_strict(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + # Use the parent EnumMeta's __call__ which raises ValueError for unknown values + return enum.EnumMeta.__call__(cls_inner, v) + + # Create a lax validator that accepts unknown values + def validate_lax(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + try: + return enum.EnumMeta.__call__(cls_inner, v) + except ValueError: + # Return the raw value for unknown enum values + return v + + # Determine the base type schema (str or int) + is_int_enum = False + for base in cls_inner.__mro__: + if base is int: + is_int_enum = True + break + if base is str: + break + + base_schema = ( + core_schema.int_schema() + if is_int_enum + else core_schema.str_schema() + ) + + # Use lax_or_strict_schema: + # - strict mode: only known enum values match (raises ValueError for unknown) + # - lax mode: accept any value, return enum member or raw value + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_lax)] + ), + strict_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_strict)] + ), + ) + + setattr(cls, "__get_pydantic_core_schema__", classmethod(__get_pydantic_core_schema__)) + return cls diff --git a/src/mistralai/client/utils/eventstreaming.py b/src/mistralai/client/utils/eventstreaming.py new file mode 100644 index 00000000..0969899b --- /dev/null +++ b/src/mistralai/client/utils/eventstreaming.py @@ -0,0 +1,248 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import json +from typing import ( + Callable, + Generic, + TypeVar, + Optional, + Generator, + AsyncGenerator, + Tuple, +) +import httpx + +T = TypeVar("T") + + +class EventStream(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] + response: httpx.Response + generator: Generator[T, None, None] + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + client_ref: Optional[object] = None, + ): + self.response = response + self.generator = stream_events(response, decoder, sentinel) + self.client_ref = client_ref + + def __iter__(self): + return self + + def __next__(self): + return next(self.generator) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.response.close() + + +class EventStreamAsync(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] + response: httpx.Response + generator: AsyncGenerator[T, None] + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + client_ref: Optional[object] = None, + ): + self.response = response + self.generator = stream_events_async(response, decoder, sentinel) + self.client_ref = client_ref + + def __aiter__(self): + return self + + async def __anext__(self): + return await self.generator.__anext__() + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.response.aclose() + + +class ServerEvent: + id: Optional[str] = None + event: Optional[str] = None + data: Optional[str] = None + retry: Optional[int] = None + + +MESSAGE_BOUNDARIES = [ + b"\r\n\r\n", + b"\n\n", + b"\r\r", +] + + +async def stream_events_async( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> AsyncGenerator[T, None]: + buffer = bytearray() + position = 0 + discard = False + async for chunk in response.aiter_bytes(): + # We've encountered the sentinel value and should no longer process + # incoming data. Instead we throw new data away until the server closes + # the connection. + if discard: + continue + + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard = _parse_event(block, decoder, sentinel) + if event is not None: + yield event + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard = _parse_event(buffer, decoder, sentinel) + if event is not None: + yield event + + +def stream_events( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> Generator[T, None, None]: + buffer = bytearray() + position = 0 + discard = False + for chunk in response.iter_bytes(): + # We've encountered the sentinel value and should no longer process + # incoming data. Instead we throw new data away until the server closes + # the connection. + if discard: + continue + + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard = _parse_event(block, decoder, sentinel) + if event is not None: + yield event + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard = _parse_event(buffer, decoder, sentinel) + if event is not None: + yield event + + +def _parse_event( + raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None +) -> Tuple[Optional[T], bool]: + block = raw.decode() + lines = re.split(r"\r?\n|\r", block) + publish = False + event = ServerEvent() + data = "" + for line in lines: + if not line: + continue + + delim = line.find(":") + if delim <= 0: + continue + + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] + + if field == "event": + event.event = value + publish = True + elif field == "data": + data += value + "\n" + publish = True + elif field == "id": + event.id = value + publish = True + elif field == "retry": + event.retry = int(value) if value.isdigit() else None + publish = True + + if sentinel and data == f"{sentinel}\n": + return None, True + + if data: + data = data[:-1] + event.data = data + + data_is_primitive = ( + data.isnumeric() or data == "true" or data == "false" or data == "null" + ) + data_is_json = ( + data.startswith("{") or data.startswith("[") or data.startswith('"') + ) + + if data_is_primitive or data_is_json: + try: + event.data = json.loads(data) + except Exception: + pass + + out = None + if publish: + out = decoder(json.dumps(event.__dict__)) + + return out, False + + +def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): + if len(sequence) > (len(buffer) - position): + return None + + for i, seq in enumerate(sequence): + if buffer[position + i] != seq: + return None + + return sequence diff --git a/src/mistralai/client/utils/forms.py b/src/mistralai/client/utils/forms.py new file mode 100644 index 00000000..f961e76b --- /dev/null +++ b/src/mistralai/client/utils/forms.py @@ -0,0 +1,234 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ( + FormMetadata, + MultipartFormMetadata, + find_field_metadata, +) +from .values import _is_set, _val_to_string + + +def _populate_form( + field_name: str, + explode: bool, + obj: Any, + delimiter: str, + form: Dict[str, List[str]], +): + if not _is_set(obj): + return form + + if isinstance(obj, BaseModel): + items = [] + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_field_name = obj_field.alias if obj_field.alias is not None else name + if obj_field_name == "": + continue + + val = getattr(obj, name) + if not _is_set(val): + continue + + if explode: + form[obj_field_name] = [_val_to_string(val)] + else: + items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, Dict): + items = [] + for key, value in obj.items(): + if not _is_set(value): + continue + + if explode: + form[key] = [_val_to_string(value)] + else: + items.append(f"{key}{delimiter}{_val_to_string(value)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, List): + items = [] + + for value in obj: + if not _is_set(value): + continue + + if explode: + if not field_name in form: + form[field_name] = [] + form[field_name].append(_val_to_string(value)) + else: + items.append(_val_to_string(value)) + + if len(items) > 0: + form[field_name] = [delimiter.join([str(item) for item in items])] + else: + form[field_name] = [_val_to_string(obj)] + + return form + + +def _extract_file_properties(file_obj: Any) -> Tuple[str, Any, Any]: + """Extract file name, content, and content type from a file object.""" + file_fields: Dict[str, FieldInfo] = file_obj.__class__.model_fields + + file_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(file_obj, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(file_obj, file_field_name, None) + else: + file_name = getattr(file_obj, file_field_name) + + if file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + return file_name, content, content_type + + +def serialize_multipart_form( + media_type: str, request: Any +) -> Tuple[str, Dict[str, Any], List[Tuple[str, Any]]]: + form: Dict[str, Any] = {} + files: List[Tuple[str, Any]] = [] + + if not isinstance(request, BaseModel): + raise TypeError("invalid request body type") + + request_fields: Dict[str, FieldInfo] = request.__class__.model_fields + request_field_types = get_type_hints(request.__class__) + + for name in request_fields: + field = request_fields[name] + + val = getattr(request, name) + if not _is_set(val): + continue + + field_metadata = find_field_metadata(field, MultipartFormMetadata) + if not field_metadata: + continue + + f_name = field.alias if field.alias else name + + if field_metadata.file: + if isinstance(val, List): + # Handle array of files + array_field_name = f_name + "[]" + for file_obj in val: + if not _is_set(file_obj): + continue + + file_name, content, content_type = _extract_file_properties( + file_obj + ) + + if content_type is not None: + files.append( + (array_field_name, (file_name, content, content_type)) + ) + else: + files.append((array_field_name, (file_name, content))) + else: + # Handle single file + file_name, content, content_type = _extract_file_properties(val) + + if content_type is not None: + files.append((f_name, (file_name, content, content_type))) + else: + files.append((f_name, (file_name, content))) + elif field_metadata.json: + files.append( + ( + f_name, + ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ), + ) + ) + else: + if isinstance(val, List): + values = [] + + for value in val: + if not _is_set(value): + continue + values.append(_val_to_string(value)) + + array_field_name = f_name + "[]" + form[array_field_name] = values + else: + form[f_name] = _val_to_string(val) + return media_type, form, files + + +def serialize_form_data(data: Any) -> Dict[str, Any]: + form: Dict[str, List[str]] = {} + + if isinstance(data, BaseModel): + data_fields: Dict[str, FieldInfo] = data.__class__.model_fields + data_field_types = get_type_hints(data.__class__) + for name in data_fields: + field = data_fields[name] + + val = getattr(data, name) + if not _is_set(val): + continue + + metadata = find_field_metadata(field, FormMetadata) + if metadata is None: + continue + + f_name = field.alias if field.alias is not None else name + + if metadata.json: + form[f_name] = [marshal_json(val, data_field_types[name])] + else: + if metadata.style == "form": + _populate_form( + f_name, + metadata.explode, + val, + ",", + form, + ) + else: + raise ValueError(f"Invalid form style for field {name}") + elif isinstance(data, Dict): + for key, value in data.items(): + if _is_set(value): + form[key] = [_val_to_string(value)] + else: + raise TypeError(f"Invalid request body type {type(data)} for form data") + + return form diff --git a/src/mistralai/client/utils/headers.py b/src/mistralai/client/utils/headers.py new file mode 100644 index 00000000..37864cbb --- /dev/null +++ b/src/mistralai/client/utils/headers.py @@ -0,0 +1,136 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + List, + Optional, +) +from httpx import Headers +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + HeaderMetadata, + find_field_metadata, +) + +from .values import _is_set, _populate_from_globals, _val_to_string + + +def get_headers(headers_params: Any, gbls: Optional[Any] = None) -> Dict[str, str]: + headers: Dict[str, str] = {} + + globals_already_populated = [] + if _is_set(headers_params): + globals_already_populated = _populate_headers(headers_params, gbls, headers, []) + if _is_set(gbls): + _populate_headers(gbls, None, headers, globals_already_populated) + + return headers + + +def _populate_headers( + headers_params: Any, + gbls: Any, + header_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(headers_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = headers_params.__class__.model_fields + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + f_name = field.alias if field.alias is not None else name + + metadata = find_field_metadata(field, HeaderMetadata) + if metadata is None: + continue + + value, global_found = _populate_from_globals( + name, getattr(headers_params, name), HeaderMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + value = _serialize_header(metadata.explode, value) + + if value != "": + header_values[f_name] = value + + return globals_already_populated + + +def _serialize_header(explode: bool, obj: Any) -> str: + if not _is_set(obj): + return "" + + if isinstance(obj, BaseModel): + items = [] + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_param_metadata = find_field_metadata(obj_field, HeaderMetadata) + + if not obj_param_metadata: + continue + + f_name = obj_field.alias if obj_field.alias is not None else name + + val = getattr(obj, name) + if not _is_set(val): + continue + + if explode: + items.append(f"{f_name}={_val_to_string(val)}") + else: + items.append(f_name) + items.append(_val_to_string(val)) + + if len(items) > 0: + return ",".join(items) + elif isinstance(obj, Dict): + items = [] + + for key, value in obj.items(): + if not _is_set(value): + continue + + if explode: + items.append(f"{key}={_val_to_string(value)}") + else: + items.append(key) + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join([str(item) for item in items]) + elif isinstance(obj, List): + items = [] + + for value in obj: + if not _is_set(value): + continue + + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join(items) + elif _is_set(obj): + return f"{_val_to_string(obj)}" + + return "" + + +def get_response_headers(headers: Headers) -> Dict[str, List[str]]: + res: Dict[str, List[str]] = {} + for k, v in headers.items(): + if not k in res: + res[k] = [] + + res[k].append(v) + return res diff --git a/src/mistralai/client/utils/logger.py b/src/mistralai/client/utils/logger.py new file mode 100644 index 00000000..2ef27ee5 --- /dev/null +++ b/src/mistralai/client/utils/logger.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +import logging +import os +from typing import Any, Protocol + + +class Logger(Protocol): + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + + +class NoOpLogger: + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + + +def get_body_content(req: httpx.Request) -> str: + return "" if not hasattr(req, "_content") else str(req.content) + + +def get_default_logger() -> Logger: + if os.getenv("MISTRAL_DEBUG"): + logging.basicConfig(level=logging.DEBUG) + return logging.getLogger("mistralai.client") + return NoOpLogger() diff --git a/src/mistralai/client/utils/metadata.py b/src/mistralai/client/utils/metadata.py new file mode 100644 index 00000000..173b3e5c --- /dev/null +++ b/src/mistralai/client/utils/metadata.py @@ -0,0 +1,118 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Optional, Type, TypeVar, Union +from dataclasses import dataclass +from pydantic.fields import FieldInfo + + +T = TypeVar("T") + + +@dataclass +class SecurityMetadata: + option: bool = False + scheme: bool = False + scheme_type: Optional[str] = None + sub_type: Optional[str] = None + field_name: Optional[str] = None + + def get_field_name(self, default: str) -> str: + return self.field_name or default + + +@dataclass +class ParamMetadata: + serialization: Optional[str] = None + style: str = "simple" + explode: bool = False + + +@dataclass +class PathParamMetadata(ParamMetadata): + pass + + +@dataclass +class QueryParamMetadata(ParamMetadata): + style: str = "form" + explode: bool = True + + +@dataclass +class HeaderMetadata(ParamMetadata): + pass + + +@dataclass +class RequestMetadata: + media_type: str = "application/octet-stream" + + +@dataclass +class MultipartFormMetadata: + file: bool = False + content: bool = False + json: bool = False + + +@dataclass +class FormMetadata: + json: bool = False + style: str = "form" + explode: bool = True + + +class FieldMetadata: + security: Optional[SecurityMetadata] = None + path: Optional[PathParamMetadata] = None + query: Optional[QueryParamMetadata] = None + header: Optional[HeaderMetadata] = None + request: Optional[RequestMetadata] = None + form: Optional[FormMetadata] = None + multipart: Optional[MultipartFormMetadata] = None + + def __init__( + self, + security: Optional[SecurityMetadata] = None, + path: Optional[Union[PathParamMetadata, bool]] = None, + query: Optional[Union[QueryParamMetadata, bool]] = None, + header: Optional[Union[HeaderMetadata, bool]] = None, + request: Optional[Union[RequestMetadata, bool]] = None, + form: Optional[Union[FormMetadata, bool]] = None, + multipart: Optional[Union[MultipartFormMetadata, bool]] = None, + ): + self.security = security + self.path = PathParamMetadata() if isinstance(path, bool) else path + self.query = QueryParamMetadata() if isinstance(query, bool) else query + self.header = HeaderMetadata() if isinstance(header, bool) else header + self.request = RequestMetadata() if isinstance(request, bool) else request + self.form = FormMetadata() if isinstance(form, bool) else form + self.multipart = ( + MultipartFormMetadata() if isinstance(multipart, bool) else multipart + ) + + +def find_field_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = find_metadata(field_info, FieldMetadata) + if not metadata: + return None + + fields = metadata.__dict__ + + for field in fields: + if isinstance(fields[field], metadata_type): + return fields[field] + + return None + + +def find_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = field_info.metadata + if not metadata: + return None + + for md in metadata: + if isinstance(md, metadata_type): + return md + + return None diff --git a/src/mistralai/client/utils/queryparams.py b/src/mistralai/client/utils/queryparams.py new file mode 100644 index 00000000..c04e0db8 --- /dev/null +++ b/src/mistralai/client/utils/queryparams.py @@ -0,0 +1,217 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, +) + +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + QueryParamMetadata, + find_field_metadata, +) +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) +from .forms import _populate_form + + +def get_query_params( + query_params: Any, + gbls: Optional[Any] = None, + allow_empty_value: Optional[List[str]] = None, +) -> Dict[str, List[str]]: + params: Dict[str, List[str]] = {} + + globals_already_populated = _populate_query_params(query_params, gbls, params, [], allow_empty_value) + if _is_set(gbls): + _populate_query_params(gbls, None, params, globals_already_populated, allow_empty_value) + + return params + + +def _populate_query_params( + query_params: Any, + gbls: Any, + query_param_values: Dict[str, List[str]], + skip_fields: List[str], + allow_empty_value: Optional[List[str]] = None, +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(query_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields + param_field_types = get_type_hints(query_params.__class__) + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + + metadata = find_field_metadata(field, QueryParamMetadata) + if not metadata: + continue + + value = getattr(query_params, name) if _is_set(query_params) else None + + value, global_found = _populate_from_globals( + name, value, QueryParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + f_name = field.alias if field.alias is not None else name + + allow_empty_set = set(allow_empty_value or []) + should_include_empty = f_name in allow_empty_set and ( + value is None or value == [] or value == "" + ) + + if should_include_empty: + query_param_values[f_name] = [""] + continue + + serialization = metadata.serialization + if serialization is not None: + serialized_parms = _get_serialized_params( + metadata, f_name, value, param_field_types[name] + ) + for key, value in serialized_parms.items(): + if key in query_param_values: + query_param_values[key].extend(value) + else: + query_param_values[key] = [value] + else: + style = metadata.style + if style == "deepObject": + _populate_deep_object_query_params(f_name, value, query_param_values) + elif style == "form": + _populate_delimited_query_params( + metadata, f_name, value, ",", query_param_values + ) + elif style == "pipeDelimited": + _populate_delimited_query_params( + metadata, f_name, value, "|", query_param_values + ) + else: + raise NotImplementedError( + f"query param style {style} not yet supported" + ) + + return globals_already_populated + + +def _populate_deep_object_query_params( + field_name: str, + obj: Any, + params: Dict[str, List[str]], +): + if not _is_set(obj): + return + + if isinstance(obj, BaseModel): + _populate_deep_object_query_params_basemodel(field_name, obj, params) + elif isinstance(obj, Dict): + _populate_deep_object_query_params_dict(field_name, obj, params) + + +def _populate_deep_object_query_params_basemodel( + prior_params_key: str, + obj: Any, + params: Dict[str, List[str]], +): + if not _is_set(obj) or not isinstance(obj, BaseModel): + return + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + + f_name = obj_field.alias if obj_field.alias is not None else name + + params_key = f"{prior_params_key}[{f_name}]" + + obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) + if not _is_set(obj_param_metadata): + continue + + obj_val = getattr(obj, name) + if not _is_set(obj_val): + continue + + if isinstance(obj_val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, obj_val, params) + elif isinstance(obj_val, Dict): + _populate_deep_object_query_params_dict(params_key, obj_val, params) + elif isinstance(obj_val, List): + _populate_deep_object_query_params_list(params_key, obj_val, params) + else: + params[params_key] = [_val_to_string(obj_val)] + + +def _populate_deep_object_query_params_dict( + prior_params_key: str, + value: Dict, + params: Dict[str, List[str]], +): + if not _is_set(value): + return + + for key, val in value.items(): + if not _is_set(val): + continue + + params_key = f"{prior_params_key}[{key}]" + + if isinstance(val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, val, params) + elif isinstance(val, Dict): + _populate_deep_object_query_params_dict(params_key, val, params) + elif isinstance(val, List): + _populate_deep_object_query_params_list(params_key, val, params) + else: + params[params_key] = [_val_to_string(val)] + + +def _populate_deep_object_query_params_list( + params_key: str, + value: List, + params: Dict[str, List[str]], +): + if not _is_set(value): + return + + for val in value: + if not _is_set(val): + continue + + if params.get(params_key) is None: + params[params_key] = [] + + params[params_key].append(_val_to_string(val)) + + +def _populate_delimited_query_params( + metadata: QueryParamMetadata, + field_name: str, + obj: Any, + delimiter: str, + query_param_values: Dict[str, List[str]], +): + _populate_form( + field_name, + metadata.explode, + obj, + delimiter, + query_param_values, + ) diff --git a/src/mistralai/client/utils/requestbodies.py b/src/mistralai/client/utils/requestbodies.py new file mode 100644 index 00000000..1de32b6d --- /dev/null +++ b/src/mistralai/client/utils/requestbodies.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import io +from dataclasses import dataclass +import re +from typing import ( + Any, + Optional, +) + +from .forms import serialize_form_data, serialize_multipart_form + +from .serializers import marshal_json + +SERIALIZATION_METHOD_TO_CONTENT_TYPE = { + "json": "application/json", + "form": "application/x-www-form-urlencoded", + "multipart": "multipart/form-data", + "raw": "application/octet-stream", + "string": "text/plain", +} + + +@dataclass +class SerializedRequestBody: + media_type: Optional[str] = None + content: Optional[Any] = None + data: Optional[Any] = None + files: Optional[Any] = None + + +def serialize_request_body( + request_body: Any, + nullable: bool, + optional: bool, + serialization_method: str, + request_body_type, +) -> Optional[SerializedRequestBody]: + if request_body is None: + if not nullable and optional: + return None + + media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] + + serialized_request_body = SerializedRequestBody(media_type) + + if re.match(r"^(application|text)\/([^+]+\+)*json.*", media_type) is not None: + serialized_request_body.content = marshal_json(request_body, request_body_type) + elif re.match(r"^multipart\/.*", media_type) is not None: + ( + serialized_request_body.media_type, + serialized_request_body.data, + serialized_request_body.files, + ) = serialize_multipart_form(media_type, request_body) + elif re.match(r"^application\/x-www-form-urlencoded.*", media_type) is not None: + serialized_request_body.data = serialize_form_data(request_body) + elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): + serialized_request_body.content = request_body + elif isinstance(request_body, str): + serialized_request_body.content = request_body + else: + raise TypeError( + f"invalid request body type {type(request_body)} for mediaType {media_type}" + ) + + return serialized_request_body diff --git a/src/mistralai/client/utils/retries.py b/src/mistralai/client/utils/retries.py new file mode 100644 index 00000000..88a91b10 --- /dev/null +++ b/src/mistralai/client/utils/retries.py @@ -0,0 +1,281 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import asyncio +import random +import time +from datetime import datetime +from email.utils import parsedate_to_datetime +from typing import List, Optional + +import httpx + + +class BackoffStrategy: + initial_interval: int + max_interval: int + exponent: float + max_elapsed_time: int + + def __init__( + self, + initial_interval: int, + max_interval: int, + exponent: float, + max_elapsed_time: int, + ): + self.initial_interval = initial_interval + self.max_interval = max_interval + self.exponent = exponent + self.max_elapsed_time = max_elapsed_time + + +class RetryConfig: + strategy: str + backoff: BackoffStrategy + retry_connection_errors: bool + + def __init__( + self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool + ): + self.strategy = strategy + self.backoff = backoff + self.retry_connection_errors = retry_connection_errors + + +class Retries: + config: RetryConfig + status_codes: List[str] + + def __init__(self, config: RetryConfig, status_codes: List[str]): + self.config = config + self.status_codes = status_codes + + +class TemporaryError(Exception): + response: httpx.Response + retry_after: Optional[int] + + def __init__(self, response: httpx.Response): + self.response = response + self.retry_after = _parse_retry_after_header(response) + + +class PermanentError(Exception): + inner: Exception + + def __init__(self, inner: Exception): + self.inner = inner + + +def _parse_retry_after_header(response: httpx.Response) -> Optional[int]: + """Parse Retry-After header from response. + + Returns: + Retry interval in milliseconds, or None if header is missing or invalid. + """ + retry_after_header = response.headers.get("retry-after") + if not retry_after_header: + return None + + try: + seconds = float(retry_after_header) + return round(seconds * 1000) + except ValueError: + pass + + try: + retry_date = parsedate_to_datetime(retry_after_header) + delta = (retry_date - datetime.now(retry_date.tzinfo)).total_seconds() + return round(max(0, delta) * 1000) + except (ValueError, TypeError): + pass + + return None + + +def _get_sleep_interval( + exception: Exception, + initial_interval: int, + max_interval: int, + exponent: float, + retries: int, +) -> float: + """Get sleep interval for retry with exponential backoff. + + Args: + exception: The exception that triggered the retry. + initial_interval: Initial retry interval in milliseconds. + max_interval: Maximum retry interval in milliseconds. + exponent: Base for exponential backoff calculation. + retries: Current retry attempt count. + + Returns: + Sleep interval in seconds. + """ + if ( + isinstance(exception, TemporaryError) + and exception.retry_after is not None + and exception.retry_after > 0 + ): + return exception.retry_after / 1000 + + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + return min(sleep, max_interval / 1000) + + +def retry(func, retries: Retries): + if retries.config.strategy == "backoff": + + def do_request() -> httpx.Response: + res: httpx.Response + try: + res = func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except httpx.ConnectError as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except httpx.TimeoutException as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return retry_with_backoff( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return func() + + +async def retry_async(func, retries: Retries): + if retries.config.strategy == "backoff": + + async def do_request() -> httpx.Response: + res: httpx.Response + try: + res = await func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except httpx.ConnectError as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except httpx.TimeoutException as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return await retry_with_backoff_async( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return await func() + + +def retry_with_backoff( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) + time.sleep(sleep) + retries += 1 + + +async def retry_with_backoff_async( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return await func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) + await asyncio.sleep(sleep) + retries += 1 diff --git a/src/mistralai/client/utils/security.py b/src/mistralai/client/utils/security.py new file mode 100644 index 00000000..3b8526bf --- /dev/null +++ b/src/mistralai/client/utils/security.py @@ -0,0 +1,192 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import base64 + +from typing import ( + Any, + Dict, + List, + Optional, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + SecurityMetadata, + find_field_metadata, +) +import os + + +def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: + headers: Dict[str, str] = {} + query_params: Dict[str, List[str]] = {} + + if security is None: + return headers, query_params + + if not isinstance(security, BaseModel): + raise TypeError("security must be a pydantic model") + + sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields + for name in sec_fields: + sec_field = sec_fields[name] + + value = getattr(security, name) + if value is None: + continue + + metadata = find_field_metadata(sec_field, SecurityMetadata) + if metadata is None: + continue + if metadata.option: + _parse_security_option(headers, query_params, value) + return headers, query_params + if metadata.scheme: + # Special case for basic auth or custom auth which could be a flattened model + if metadata.sub_type in ["basic", "custom"] and not isinstance( + value, BaseModel + ): + _parse_security_scheme(headers, query_params, metadata, name, security) + else: + _parse_security_scheme(headers, query_params, metadata, name, value) + + return headers, query_params + + +def get_security_from_env(security: Any, security_class: Any) -> Optional[BaseModel]: + if security is not None: + return security + + if not issubclass(security_class, BaseModel): + raise TypeError("security_class must be a pydantic model class") + + security_dict: Any = {} + + if os.getenv("MISTRAL_API_KEY"): + security_dict["api_key"] = os.getenv("MISTRAL_API_KEY") + + return security_class(**security_dict) if security_dict else None + + +def _parse_security_option( + headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any +): + if not isinstance(option, BaseModel): + raise TypeError("security option must be a pydantic model") + + opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields + for name in opt_fields: + opt_field = opt_fields[name] + + metadata = find_field_metadata(opt_field, SecurityMetadata) + if metadata is None or not metadata.scheme: + continue + _parse_security_scheme( + headers, query_params, metadata, name, getattr(option, name) + ) + + +def _parse_security_scheme( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + field_name: str, + scheme: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + if isinstance(scheme, BaseModel): + if scheme_type == "http": + if sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + if sub_type == "custom": + return + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + value = getattr(scheme, name) + + _parse_security_scheme_value( + headers, query_params, scheme_metadata, metadata, name, value + ) + else: + _parse_security_scheme_value( + headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme + ) + + +def _parse_security_scheme_value( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + security_metadata: SecurityMetadata, + field_name: str, + value: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + header_name = security_metadata.get_field_name(field_name) + + if scheme_type == "apiKey": + if sub_type == "header": + headers[header_name] = value + elif sub_type == "query": + query_params[header_name] = [value] + else: + raise ValueError("sub type {sub_type} not supported") + elif scheme_type == "openIdConnect": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "oauth2": + if sub_type != "client_credentials": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "http": + if sub_type == "bearer": + headers[header_name] = _apply_bearer(value) + elif sub_type == "custom": + return + else: + raise ValueError("sub type {sub_type} not supported") + else: + raise ValueError("scheme type {scheme_type} not supported") + + +def _apply_bearer(token: str) -> str: + return token.lower().startswith("bearer ") and token or f"Bearer {token}" + + +def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): + username = "" + password = "" + + if not isinstance(scheme, BaseModel): + raise TypeError("basic auth scheme must be a pydantic model") + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + field_name = metadata.field_name + value = getattr(scheme, name) + + if field_name == "username": + username = value + if field_name == "password": + password = value + + data = f"{username}:{password}".encode() + headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/src/mistralai/client/utils/serializers.py b/src/mistralai/client/utils/serializers.py new file mode 100644 index 00000000..14321eb4 --- /dev/null +++ b/src/mistralai/client/utils/serializers.py @@ -0,0 +1,229 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +import functools +import json +import typing +from typing import Any, Dict, List, Tuple, Union, get_args +import typing_extensions +from typing_extensions import get_origin + +import httpx +from pydantic import ConfigDict, create_model +from pydantic_core import from_json + +from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset + + +def serialize_decimal(as_str: bool): + def serialize(d): + # Optional[T] is a Union[T, None] + if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: + return None + if isinstance(d, Unset): + return d + + if not isinstance(d, Decimal): + raise ValueError("Expected Decimal object") + + return str(d) if as_str else float(d) + + return serialize + + +def validate_decimal(d): + if d is None: + return None + + if isinstance(d, (Decimal, Unset)): + return d + + if not isinstance(d, (str, int, float)): + raise ValueError("Expected string, int or float") + + return Decimal(str(d)) + + +def serialize_float(as_str: bool): + def serialize(f): + # Optional[T] is a Union[T, None] + if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: + return None + if isinstance(f, Unset): + return f + + if not isinstance(f, float): + raise ValueError("Expected float") + + return str(f) if as_str else f + + return serialize + + +def validate_float(f): + if f is None: + return None + + if isinstance(f, (float, Unset)): + return f + + if not isinstance(f, str): + raise ValueError("Expected string") + + return float(f) + + +def serialize_int(as_str: bool): + def serialize(i): + # Optional[T] is a Union[T, None] + if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: + return None + if isinstance(i, Unset): + return i + + if not isinstance(i, int): + raise ValueError("Expected int") + + return str(i) if as_str else i + + return serialize + + +def validate_int(b): + if b is None: + return None + + if isinstance(b, (int, Unset)): + return b + + if not isinstance(b, str): + raise ValueError("Expected string") + + return int(b) + + +def validate_const(v): + def validate(c): + # Optional[T] is a Union[T, None] + if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: + return None + + if v != c: + raise ValueError(f"Expected {v}") + + return c + + return validate + + +def unmarshal_json(raw, typ: Any) -> Any: + return unmarshal(from_json(raw), typ) + + +def unmarshal(val, typ: Any) -> Any: + unmarshaller = create_model( + "Unmarshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = unmarshaller(body=val) + + # pyright: ignore[reportAttributeAccessIssue] + return m.body # type: ignore + + +def marshal_json(val, typ): + if is_nullable(typ) and val is None: + return "null" + + marshaller = create_model( + "Marshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = marshaller(body=val) + + d = m.model_dump(by_alias=True, mode="json", exclude_none=True) + + if len(d) == 0: + return "" + + return json.dumps(d[next(iter(d))], separators=(",", ":")) + + +def is_nullable(field): + origin = get_origin(field) + if origin is Nullable or origin is OptionalNullable: + return True + + if not origin is Union or type(None) not in get_args(field): + return False + + for arg in get_args(field): + if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: + return True + + return False + + +def is_union(obj: object) -> bool: + """ + Returns True if the given object is a typing.Union or typing_extensions.Union. + """ + return any( + obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union") + ) + + +def stream_to_text(stream: httpx.Response) -> str: + return "".join(stream.iter_text()) + + +async def stream_to_text_async(stream: httpx.Response) -> str: + return "".join([chunk async for chunk in stream.aiter_text()]) + + +def stream_to_bytes(stream: httpx.Response) -> bytes: + return stream.content + + +async def stream_to_bytes_async(stream: httpx.Response) -> bytes: + return await stream.aread() + + +def get_pydantic_model(data: Any, typ: Any) -> Any: + if not _contains_pydantic_model(data): + return unmarshal(data, typ) + + return data + + +def _contains_pydantic_model(data: Any) -> bool: + if isinstance(data, BaseModel): + return True + if isinstance(data, List): + return any(_contains_pydantic_model(item) for item in data) + if isinstance(data, Dict): + return any(_contains_pydantic_model(value) for value in data.values()) + + return False + + +@functools.cache +def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: + """ + Get typing objects by name from typing and typing_extensions. + Reference: https://typing-extensions.readthedocs.io/en/latest/#runtime-use-of-types + """ + result = tuple( + getattr(module, name) + for module in (typing, typing_extensions) + if hasattr(module, name) + ) + if not result: + raise ValueError( + f"Neither typing nor typing_extensions has an object called {name!r}" + ) + return result diff --git a/src/mistralai/client/utils/unmarshal_json_response.py b/src/mistralai/client/utils/unmarshal_json_response.py new file mode 100644 index 00000000..6d43d6e4 --- /dev/null +++ b/src/mistralai/client/utils/unmarshal_json_response.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any, Optional, Type, TypeVar, overload + +import httpx + +from .serializers import unmarshal_json +from mistralai.client import models + +T = TypeVar("T") + + +@overload +def unmarshal_json_response( + typ: Type[T], http_res: httpx.Response, body: Optional[str] = None +) -> T: ... + + +@overload +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: ... + + +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: + if body is None: + body = http_res.text + try: + return unmarshal_json(body, typ) + except Exception as e: + raise models.ResponseValidationError( + "Response validation failed", + http_res, + e, + body, + ) from e diff --git a/src/mistralai/client/utils/url.py b/src/mistralai/client/utils/url.py new file mode 100644 index 00000000..c78ccbae --- /dev/null +++ b/src/mistralai/client/utils/url.py @@ -0,0 +1,155 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, + Union, + get_args, + get_origin, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + PathParamMetadata, + find_field_metadata, +) +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) + + +def generate_url( + server_url: str, + path: str, + path_params: Any, + gbls: Optional[Any] = None, +) -> str: + path_param_values: Dict[str, str] = {} + + globals_already_populated = _populate_path_params( + path_params, gbls, path_param_values, [] + ) + if _is_set(gbls): + _populate_path_params(gbls, None, path_param_values, globals_already_populated) + + for key, value in path_param_values.items(): + path = path.replace("{" + key + "}", value, 1) + + return remove_suffix(server_url, "/") + path + + +def _populate_path_params( + path_params: Any, + gbls: Any, + path_param_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(path_params, BaseModel): + return globals_already_populated + + path_param_fields: Dict[str, FieldInfo] = path_params.__class__.model_fields + path_param_field_types = get_type_hints(path_params.__class__) + for name in path_param_fields: + if name in skip_fields: + continue + + field = path_param_fields[name] + + param_metadata = find_field_metadata(field, PathParamMetadata) + if param_metadata is None: + continue + + param = getattr(path_params, name) if _is_set(path_params) else None + param, global_found = _populate_from_globals( + name, param, PathParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + if not _is_set(param): + continue + + f_name = field.alias if field.alias is not None else name + serialization = param_metadata.serialization + if serialization is not None: + serialized_params = _get_serialized_params( + param_metadata, f_name, param, path_param_field_types[name] + ) + for key, value in serialized_params.items(): + path_param_values[key] = value + else: + pp_vals: List[str] = [] + if param_metadata.style == "simple": + if isinstance(param, List): + for pp_val in param: + if not _is_set(pp_val): + continue + pp_vals.append(_val_to_string(pp_val)) + path_param_values[f_name] = ",".join(pp_vals) + elif isinstance(param, Dict): + for pp_key in param: + if not _is_set(param[pp_key]): + continue + if param_metadata.explode: + pp_vals.append(f"{pp_key}={_val_to_string(param[pp_key])}") + else: + pp_vals.append(f"{pp_key},{_val_to_string(param[pp_key])}") + path_param_values[f_name] = ",".join(pp_vals) + elif not isinstance(param, (str, int, float, complex, bool, Decimal)): + param_fields: Dict[str, FieldInfo] = param.__class__.model_fields + for name in param_fields: + param_field = param_fields[name] + + param_value_metadata = find_field_metadata( + param_field, PathParamMetadata + ) + if param_value_metadata is None: + continue + + param_name = ( + param_field.alias if param_field.alias is not None else name + ) + + param_field_val = getattr(param, name) + if not _is_set(param_field_val): + continue + if param_metadata.explode: + pp_vals.append( + f"{param_name}={_val_to_string(param_field_val)}" + ) + else: + pp_vals.append( + f"{param_name},{_val_to_string(param_field_val)}" + ) + path_param_values[f_name] = ",".join(pp_vals) + elif _is_set(param): + path_param_values[f_name] = _val_to_string(param) + + return globals_already_populated + + +def is_optional(field): + return get_origin(field) is Union and type(None) in get_args(field) + + +def template_url(url_with_params: str, params: Dict[str, str]) -> str: + for key, value in params.items(): + url_with_params = url_with_params.replace("{" + key + "}", value) + + return url_with_params + + +def remove_suffix(input_string, suffix): + if suffix and input_string.endswith(suffix): + return input_string[: -len(suffix)] + return input_string diff --git a/src/mistralai/client/utils/values.py b/src/mistralai/client/utils/values.py new file mode 100644 index 00000000..dae01a44 --- /dev/null +++ b/src/mistralai/client/utils/values.py @@ -0,0 +1,137 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +from enum import Enum +from email.message import Message +from functools import partial +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast + +from httpx import Response +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from ..types.basemodel import Unset + +from .serializers import marshal_json + +from .metadata import ParamMetadata, find_field_metadata + + +def match_content_type(content_type: str, pattern: str) -> bool: + if pattern in (content_type, "*", "*/*"): + return True + + msg = Message() + msg["content-type"] = content_type + media_type = msg.get_content_type() + + if media_type == pattern: + return True + + parts = media_type.split("/") + if len(parts) == 2: + if pattern in (f"{parts[0]}/*", f"*/{parts[1]}"): + return True + + return False + + +def match_status_codes(status_codes: List[str], status_code: int) -> bool: + if "default" in status_codes: + return True + + for code in status_codes: + if code == str(status_code): + return True + + if code.endswith("XX") and code.startswith(str(status_code)[:1]): + return True + return False + + +T = TypeVar("T") + +def cast_partial(typ): + return partial(cast, typ) + +def get_global_from_env( + value: Optional[T], env_key: str, type_cast: Callable[[str], T] +) -> Optional[T]: + if value is not None: + return value + env_value = os.getenv(env_key) + if env_value is not None: + try: + return type_cast(env_value) + except ValueError: + pass + return None + + +def match_response( + response: Response, code: Union[str, List[str]], content_type: str +) -> bool: + codes = code if isinstance(code, list) else [code] + return match_status_codes(codes, response.status_code) and match_content_type( + response.headers.get("content-type", "application/octet-stream"), content_type + ) + + +def _populate_from_globals( + param_name: str, value: Any, param_metadata_type: type, gbls: Any +) -> Tuple[Any, bool]: + if gbls is None: + return value, False + + if not isinstance(gbls, BaseModel): + raise TypeError("globals must be a pydantic model") + + global_fields: Dict[str, FieldInfo] = gbls.__class__.model_fields + found = False + for name in global_fields: + field = global_fields[name] + if name is not param_name: + continue + + found = True + + if value is not None: + return value, True + + global_value = getattr(gbls, name) + + param_metadata = find_field_metadata(field, param_metadata_type) + if param_metadata is None: + return value, True + + return global_value, True + + return value, found + + +def _val_to_string(val) -> str: + if isinstance(val, bool): + return str(val).lower() + if isinstance(val, datetime): + return str(val.isoformat().replace("+00:00", "Z")) + if isinstance(val, Enum): + return str(val.value) + + return str(val) + + +def _get_serialized_params( + metadata: ParamMetadata, field_name: str, obj: Any, typ: type +) -> Dict[str, str]: + params: Dict[str, str] = {} + + serialization = metadata.serialization + if serialization == "json": + params[field_name] = marshal_json(obj, typ) + + return params + + +def _is_set(value: Any) -> bool: + return value is not None and not isinstance(value, Unset) diff --git a/uv.lock b/uv.lock index fe22e76a..4b1890b2 100644 --- a/uv.lock +++ b/uv.lock @@ -563,7 +563,7 @@ wheels = [ [[package]] name = "mistralai" -version = "1.12.0" +version = "2.0.0a1" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, From 233c672feb2c34145db71eac13c6923a5d76dd04 Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:32:00 +0100 Subject: [PATCH 09/42] fix: migrate custom hooks to client/_hooks/ - Move custom_user_agent.py, deprecation_warning.py, tracing.py - Update tracing.py to use absolute import for mistralai.extra - Update registration.py to register all custom hooks --- src/mistralai/_hooks/registration.py | 22 ------------------- .../{ => client}/_hooks/custom_user_agent.py | 0 .../_hooks/deprecation_warning.py | 0 src/mistralai/client/_hooks/registration.py | 13 +++++++++-- src/mistralai/{ => client}/_hooks/tracing.py | 2 +- 5 files changed, 12 insertions(+), 25 deletions(-) delete mode 100644 src/mistralai/_hooks/registration.py rename src/mistralai/{ => client}/_hooks/custom_user_agent.py (100%) rename src/mistralai/{ => client}/_hooks/deprecation_warning.py (100%) rename src/mistralai/{ => client}/_hooks/tracing.py (98%) diff --git a/src/mistralai/_hooks/registration.py b/src/mistralai/_hooks/registration.py deleted file mode 100644 index 58bebab0..00000000 --- a/src/mistralai/_hooks/registration.py +++ /dev/null @@ -1,22 +0,0 @@ -from .custom_user_agent import CustomUserAgentHook -from .deprecation_warning import DeprecationWarningHook -from .tracing import TracingHook -from .types import Hooks - -# This file is only ever generated once on the first generation and then is free to be modified. -# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them -# in this file or in separate files in the hooks folder. - - -def init_hooks(hooks: Hooks): - # pylint: disable=unused-argument - """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook - with an instance of a hook that implements that specific Hook interface - Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance - """ - tracing_hook = TracingHook() - hooks.register_before_request_hook(CustomUserAgentHook()) - hooks.register_after_success_hook(DeprecationWarningHook()) - hooks.register_after_success_hook(tracing_hook) - hooks.register_before_request_hook(tracing_hook) - hooks.register_after_error_hook(tracing_hook) diff --git a/src/mistralai/_hooks/custom_user_agent.py b/src/mistralai/client/_hooks/custom_user_agent.py similarity index 100% rename from src/mistralai/_hooks/custom_user_agent.py rename to src/mistralai/client/_hooks/custom_user_agent.py diff --git a/src/mistralai/_hooks/deprecation_warning.py b/src/mistralai/client/_hooks/deprecation_warning.py similarity index 100% rename from src/mistralai/_hooks/deprecation_warning.py rename to src/mistralai/client/_hooks/deprecation_warning.py diff --git a/src/mistralai/client/_hooks/registration.py b/src/mistralai/client/_hooks/registration.py index cab47787..58bebab0 100644 --- a/src/mistralai/client/_hooks/registration.py +++ b/src/mistralai/client/_hooks/registration.py @@ -1,6 +1,8 @@ +from .custom_user_agent import CustomUserAgentHook +from .deprecation_warning import DeprecationWarningHook +from .tracing import TracingHook from .types import Hooks - # This file is only ever generated once on the first generation and then is free to be modified. # Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them # in this file or in separate files in the hooks folder. @@ -10,4 +12,11 @@ def init_hooks(hooks: Hooks): # pylint: disable=unused-argument """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook with an instance of a hook that implements that specific Hook interface - Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance""" + Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance + """ + tracing_hook = TracingHook() + hooks.register_before_request_hook(CustomUserAgentHook()) + hooks.register_after_success_hook(DeprecationWarningHook()) + hooks.register_after_success_hook(tracing_hook) + hooks.register_before_request_hook(tracing_hook) + hooks.register_after_error_hook(tracing_hook) diff --git a/src/mistralai/_hooks/tracing.py b/src/mistralai/client/_hooks/tracing.py similarity index 98% rename from src/mistralai/_hooks/tracing.py rename to src/mistralai/client/_hooks/tracing.py index fc4656fd..b353d9bd 100644 --- a/src/mistralai/_hooks/tracing.py +++ b/src/mistralai/client/_hooks/tracing.py @@ -4,7 +4,7 @@ import httpx from opentelemetry.trace import Span -from ..extra.observability.otel import ( +from mistralai.extra.observability.otel import ( get_or_create_otel_tracer, get_response_and_error, get_traced_request_and_span, From 20305b37e6015172ba1bdbf1a9a37d41454ba614 Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:34:27 +0100 Subject: [PATCH 10/42] fix: update extra/ imports for new namespace Update all imports in src/mistralai/extra/ from: - mistralai.models -> mistralai.client.models - mistralai.types -> mistralai.client.types - mistralai.utils -> mistralai.client.utils - mistralai.sdkconfiguration -> mistralai.client.sdkconfiguration --- src/mistralai/extra/exceptions.py | 2 +- src/mistralai/extra/mcp/auth.py | 2 +- src/mistralai/extra/mcp/base.py | 2 +- src/mistralai/extra/mcp/sse.py | 2 +- src/mistralai/extra/realtime/__init__.py | 2 +- src/mistralai/extra/realtime/connection.py | 2 +- src/mistralai/extra/realtime/transcription.py | 8 ++++---- src/mistralai/extra/run/context.py | 6 +++--- src/mistralai/extra/run/result.py | 4 ++-- src/mistralai/extra/run/tools.py | 2 +- src/mistralai/extra/struct_chat.py | 2 +- src/mistralai/extra/tests/test_struct_chat.py | 2 +- src/mistralai/extra/tests/test_utils.py | 4 ++-- src/mistralai/extra/utils/response_format.py | 2 +- 14 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/mistralai/extra/exceptions.py b/src/mistralai/extra/exceptions.py index ee107698..d2cd3e79 100644 --- a/src/mistralai/extra/exceptions.py +++ b/src/mistralai/extra/exceptions.py @@ -1,7 +1,7 @@ from typing import Optional, TYPE_CHECKING if TYPE_CHECKING: - from mistralai.models import RealtimeTranscriptionError + from mistralai.client.models import RealtimeTranscriptionError class MistralClientException(Exception): diff --git a/src/mistralai/extra/mcp/auth.py b/src/mistralai/extra/mcp/auth.py index f2b2db8a..8a61ddab 100644 --- a/src/mistralai/extra/mcp/auth.py +++ b/src/mistralai/extra/mcp/auth.py @@ -4,7 +4,7 @@ from authlib.integrations.httpx_client import AsyncOAuth2Client as AsyncOAuth2ClientBase from authlib.oauth2.rfc8414 import AuthorizationServerMetadata -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel logger = logging.getLogger(__name__) diff --git a/src/mistralai/extra/mcp/base.py b/src/mistralai/extra/mcp/base.py index bbda67d5..1048c54f 100644 --- a/src/mistralai/extra/mcp/base.py +++ b/src/mistralai/extra/mcp/base.py @@ -11,7 +11,7 @@ ) from mistralai.extra.exceptions import MCPException -from mistralai.models import ( +from mistralai.client.models import ( FunctionTool, Function, SystemMessageTypedDict, diff --git a/src/mistralai/extra/mcp/sse.py b/src/mistralai/extra/mcp/sse.py index ba49fd1a..b4929c54 100644 --- a/src/mistralai/extra/mcp/sse.py +++ b/src/mistralai/extra/mcp/sse.py @@ -16,7 +16,7 @@ ) from mistralai.extra.mcp.auth import OAuthParams, AsyncOAuth2Client -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel logger = logging.getLogger(__name__) diff --git a/src/mistralai/extra/realtime/__init__.py b/src/mistralai/extra/realtime/__init__.py index 85bf1d88..7b80e045 100644 --- a/src/mistralai/extra/realtime/__init__.py +++ b/src/mistralai/extra/realtime/__init__.py @@ -1,4 +1,4 @@ -from mistralai.models import ( +from mistralai.client.models import ( AudioEncoding, AudioFormat, RealtimeTranscriptionError, diff --git a/src/mistralai/extra/realtime/connection.py b/src/mistralai/extra/realtime/connection.py index 042854ab..ffbbc735 100644 --- a/src/mistralai/extra/realtime/connection.py +++ b/src/mistralai/extra/realtime/connection.py @@ -16,7 +16,7 @@ "Install with: pip install 'mistralai[realtime]'" ) from exc -from mistralai.models import ( +from mistralai.client.models import ( AudioFormat, RealtimeTranscriptionError, RealtimeTranscriptionSession, diff --git a/src/mistralai/extra/realtime/transcription.py b/src/mistralai/extra/realtime/transcription.py index de117645..655fd9c1 100644 --- a/src/mistralai/extra/realtime/transcription.py +++ b/src/mistralai/extra/realtime/transcription.py @@ -17,15 +17,15 @@ "Install with: pip install 'mistralai[realtime]'" ) from exc -from mistralai import models, utils -from mistralai.models import ( +from mistralai.client import models, utils +from mistralai.client.models import ( AudioFormat, RealtimeTranscriptionError, RealtimeTranscriptionSession, RealtimeTranscriptionSessionCreated, ) -from mistralai.sdkconfiguration import SDKConfiguration -from mistralai.utils import generate_url, get_security, get_security_from_env +from mistralai.client.sdkconfiguration import SDKConfiguration +from mistralai.client.utils import generate_url, get_security, get_security_from_env from ..exceptions import RealtimeTranscriptionException, RealtimeTranscriptionWSError from .connection import ( diff --git a/src/mistralai/extra/run/context.py b/src/mistralai/extra/run/context.py index 0d78352a..8e570e41 100644 --- a/src/mistralai/extra/run/context.py +++ b/src/mistralai/extra/run/context.py @@ -21,7 +21,7 @@ create_function_result, create_tool_call, ) -from mistralai.models import ( +from mistralai.client.models import ( CompletionArgs, CompletionArgsTypedDict, ConversationInputs, @@ -35,10 +35,10 @@ Tools, ToolsTypedDict, ) -from mistralai.types.basemodel import BaseModel, OptionalNullable, UNSET +from mistralai.client.types.basemodel import BaseModel, OptionalNullable, UNSET if typing.TYPE_CHECKING: - from mistralai import Beta, OptionalNullable + from mistralai.client import Beta, OptionalNullable logger = getLogger(__name__) diff --git a/src/mistralai/extra/run/result.py b/src/mistralai/extra/run/result.py index 0af48ee7..6e2bcc8a 100644 --- a/src/mistralai/extra/run/result.py +++ b/src/mistralai/extra/run/result.py @@ -7,7 +7,7 @@ from pydantic import BaseModel, Discriminator, Tag from mistralai.extra.utils.response_format import pydantic_model_from_json -from mistralai.models import ( +from mistralai.client.models import ( FunctionResultEntry, FunctionCallEntry, MessageOutputEntry, @@ -34,7 +34,7 @@ ToolReferenceChunk, FunctionCallEntryArguments, ) -from mistralai.utils import get_discriminator +from mistralai.client.utils import get_discriminator RunOutputEntries = ( MessageOutputEntry diff --git a/src/mistralai/extra/run/tools.py b/src/mistralai/extra/run/tools.py index b117fdea..94ef2852 100644 --- a/src/mistralai/extra/run/tools.py +++ b/src/mistralai/extra/run/tools.py @@ -21,7 +21,7 @@ from mistralai.extra.mcp.base import MCPClientProtocol from mistralai.extra.observability.otel import GenAISpanEnum, MistralAIAttributes, set_available_attributes from mistralai.extra.run.result import RunOutputEntries -from mistralai.models import ( +from mistralai.client.models import ( FunctionResultEntry, FunctionTool, Function, diff --git a/src/mistralai/extra/struct_chat.py b/src/mistralai/extra/struct_chat.py index 773cbb6c..d3fd3f5a 100644 --- a/src/mistralai/extra/struct_chat.py +++ b/src/mistralai/extra/struct_chat.py @@ -1,7 +1,7 @@ import json from typing import Generic -from ..models import AssistantMessage, ChatCompletionChoice, ChatCompletionResponse +from mistralai.client.models import AssistantMessage, ChatCompletionChoice, ChatCompletionResponse from .utils.response_format import CustomPydanticModel, pydantic_model_from_json diff --git a/src/mistralai/extra/tests/test_struct_chat.py b/src/mistralai/extra/tests/test_struct_chat.py index dd529ba5..7b79bf77 100644 --- a/src/mistralai/extra/tests/test_struct_chat.py +++ b/src/mistralai/extra/tests/test_struct_chat.py @@ -5,7 +5,7 @@ ParsedChatCompletionChoice, ParsedAssistantMessage, ) -from ...models import ( +from mistralai.client.models import ( ChatCompletionResponse, UsageInfo, ChatCompletionChoice, diff --git a/src/mistralai/extra/tests/test_utils.py b/src/mistralai/extra/tests/test_utils.py index 41fa53e3..35523fbd 100644 --- a/src/mistralai/extra/tests/test_utils.py +++ b/src/mistralai/extra/tests/test_utils.py @@ -5,8 +5,8 @@ ) from pydantic import BaseModel, ValidationError -from ...models import ResponseFormat, JSONSchema -from ...types.basemodel import Unset +from mistralai.client.models import ResponseFormat, JSONSchema +from mistralai.client.types.basemodel import Unset import unittest diff --git a/src/mistralai/extra/utils/response_format.py b/src/mistralai/extra/utils/response_format.py index 10bff89f..2378b562 100644 --- a/src/mistralai/extra/utils/response_format.py +++ b/src/mistralai/extra/utils/response_format.py @@ -1,7 +1,7 @@ from typing import Any, TypeVar from pydantic import BaseModel -from ...models import JSONSchema, ResponseFormat +from mistralai.client.models import JSONSchema, ResponseFormat from ._pydantic_helper import rec_strict_json_schema CustomPydanticModel = TypeVar("CustomPydanticModel", bound=BaseModel) From cae72da0ae1b1c9ce64ae79e3399624df13f602a Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:35:29 +0100 Subject: [PATCH 11/42] docs: update example imports for new namespace Update all examples to use new import paths: - from mistralai import -> from mistralai.client import - from mistralai.models -> from mistralai.client.models - from mistralai.types -> from mistralai.client.types --- examples/mistral/agents/async_agents_no_streaming.py | 4 ++-- examples/mistral/agents/async_conversation_agent.py | 2 +- examples/mistral/agents/async_conversation_run.py | 4 ++-- examples/mistral/agents/async_conversation_run_mcp.py | 4 ++-- .../agents/async_conversation_run_mcp_remote.py | 2 +- .../agents/async_conversation_run_mcp_remote_auth.py | 2 +- .../mistral/agents/async_conversation_run_stream.py | 4 ++-- .../mistral/agents/async_multi_turn_conversation.py | 2 +- .../audio/async_realtime_transcription_microphone.py | 4 ++-- .../audio/async_realtime_transcription_stream.py | 4 ++-- examples/mistral/audio/chat_base64.py | 4 ++-- examples/mistral/audio/chat_no_streaming.py | 4 ++-- examples/mistral/audio/chat_streaming.py | 4 ++-- examples/mistral/audio/transcription_async.py | 2 +- examples/mistral/audio/transcription_diarize_async.py | 2 +- examples/mistral/audio/transcription_segments.py | 2 +- .../mistral/audio/transcription_segments_stream.py | 2 +- examples/mistral/audio/transcription_stream_async.py | 2 +- examples/mistral/audio/transcription_url.py | 2 +- examples/mistral/chat/async_chat_no_streaming.py | 4 ++-- .../mistral/chat/async_chat_with_image_no_streaming.py | 4 ++-- examples/mistral/chat/async_chat_with_streaming.py | 4 ++-- examples/mistral/chat/async_structured_outputs.py | 2 +- examples/mistral/chat/chat_no_streaming.py | 4 ++-- examples/mistral/chat/chat_prediction.py | 4 ++-- examples/mistral/chat/chat_with_streaming.py | 4 ++-- examples/mistral/chat/chatbot_with_streaming.py | 4 ++-- examples/mistral/chat/completion_with_streaming.py | 2 +- examples/mistral/chat/function_calling.py | 10 +++++----- examples/mistral/chat/json_format.py | 4 ++-- examples/mistral/chat/structured_outputs.py | 2 +- .../chat/structured_outputs_with_json_schema.py | 2 +- .../mistral/chat/structured_outputs_with_pydantic.py | 2 +- examples/mistral/classifier/async_classifier.py | 2 +- examples/mistral/embeddings/async_embeddings.py | 2 +- examples/mistral/embeddings/embeddings.py | 2 +- examples/mistral/fim/async_code_completion.py | 2 +- examples/mistral/fim/code_completion.py | 2 +- .../jobs/async_batch_job_chat_completion_inline.py | 2 +- examples/mistral/jobs/async_files.py | 4 ++-- examples/mistral/jobs/async_jobs.py | 4 ++-- examples/mistral/jobs/async_jobs_chat.py | 4 ++-- .../mistral/jobs/async_jobs_ocr_batch_annotation.py | 4 ++-- examples/mistral/jobs/dry_run_job.py | 4 ++-- examples/mistral/jobs/files.py | 4 ++-- examples/mistral/jobs/jobs.py | 4 ++-- examples/mistral/libraries/async_libraries.py | 4 ++-- examples/mistral/libraries/libraries.py | 4 ++-- examples/mistral/models/async_list_models.py | 2 +- examples/mistral/models/list_models.py | 2 +- examples/mistral/ocr/ocr_process_from_file.py | 2 +- examples/mistral/ocr/ocr_process_from_url.py | 2 +- 52 files changed, 82 insertions(+), 82 deletions(-) diff --git a/examples/mistral/agents/async_agents_no_streaming.py b/examples/mistral/agents/async_agents_no_streaming.py index 45f300ac..6041cad3 100755 --- a/examples/mistral/agents/async_agents_no_streaming.py +++ b/examples/mistral/agents/async_agents_no_streaming.py @@ -3,8 +3,8 @@ import asyncio import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage async def main(): diff --git a/examples/mistral/agents/async_conversation_agent.py b/examples/mistral/agents/async_conversation_agent.py index 54f002ac..981f13c7 100644 --- a/examples/mistral/agents/async_conversation_agent.py +++ b/examples/mistral/agents/async_conversation_agent.py @@ -2,7 +2,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral MODEL = "mistral-medium-latest" diff --git a/examples/mistral/agents/async_conversation_run.py b/examples/mistral/agents/async_conversation_run.py index 27f9c870..10c81d77 100644 --- a/examples/mistral/agents/async_conversation_run.py +++ b/examples/mistral/agents/async_conversation_run.py @@ -2,9 +2,9 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.run.context import RunContext -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel MODEL = "mistral-medium-2505" diff --git a/examples/mistral/agents/async_conversation_run_mcp.py b/examples/mistral/agents/async_conversation_run_mcp.py index 0e373715..52550004 100644 --- a/examples/mistral/agents/async_conversation_run_mcp.py +++ b/examples/mistral/agents/async_conversation_run_mcp.py @@ -3,7 +3,7 @@ import os import random -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.run.context import RunContext from mcp import StdioServerParameters from mistralai.extra.mcp.stdio import ( @@ -11,7 +11,7 @@ ) from pathlib import Path -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel cwd = Path(__file__).parent MODEL = "mistral-medium-latest" diff --git a/examples/mistral/agents/async_conversation_run_mcp_remote.py b/examples/mistral/agents/async_conversation_run_mcp_remote.py index 7b2f46a6..d6fac492 100644 --- a/examples/mistral/agents/async_conversation_run_mcp_remote.py +++ b/examples/mistral/agents/async_conversation_run_mcp_remote.py @@ -2,7 +2,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.run.context import RunContext from mistralai.extra.mcp.sse import ( diff --git a/examples/mistral/agents/async_conversation_run_mcp_remote_auth.py b/examples/mistral/agents/async_conversation_run_mcp_remote_auth.py index f69d8096..c255895e 100644 --- a/examples/mistral/agents/async_conversation_run_mcp_remote_auth.py +++ b/examples/mistral/agents/async_conversation_run_mcp_remote_auth.py @@ -5,7 +5,7 @@ import threading import webbrowser -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.run.context import RunContext from mistralai.extra.mcp.sse import ( diff --git a/examples/mistral/agents/async_conversation_run_stream.py b/examples/mistral/agents/async_conversation_run_stream.py index 1e6ad87b..431b9cc9 100644 --- a/examples/mistral/agents/async_conversation_run_stream.py +++ b/examples/mistral/agents/async_conversation_run_stream.py @@ -3,14 +3,14 @@ import os import random -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.run.context import RunContext from mcp import StdioServerParameters from mistralai.extra.mcp.stdio import MCPClientSTDIO from pathlib import Path from mistralai.extra.run.result import RunResult -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel cwd = Path(__file__).parent MODEL = "mistral-medium-latest" diff --git a/examples/mistral/agents/async_multi_turn_conversation.py b/examples/mistral/agents/async_multi_turn_conversation.py index d24443c0..26c2378f 100644 --- a/examples/mistral/agents/async_multi_turn_conversation.py +++ b/examples/mistral/agents/async_multi_turn_conversation.py @@ -1,5 +1,5 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.run.context import RunContext import logging diff --git a/examples/mistral/audio/async_realtime_transcription_microphone.py b/examples/mistral/audio/async_realtime_transcription_microphone.py index 748dbcaf..191a21e4 100644 --- a/examples/mistral/audio/async_realtime_transcription_microphone.py +++ b/examples/mistral/audio/async_realtime_transcription_microphone.py @@ -23,9 +23,9 @@ from rich.panel import Panel from rich.text import Text -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.realtime import UnknownRealtimeEvent -from mistralai.models import ( +from mistralai.client.models import ( AudioFormat, RealtimeTranscriptionError, RealtimeTranscriptionSessionCreated, diff --git a/examples/mistral/audio/async_realtime_transcription_stream.py b/examples/mistral/audio/async_realtime_transcription_stream.py index 6dbcd103..0a0ac609 100644 --- a/examples/mistral/audio/async_realtime_transcription_stream.py +++ b/examples/mistral/audio/async_realtime_transcription_stream.py @@ -9,9 +9,9 @@ from pathlib import Path from typing import AsyncIterator -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.realtime.connection import UnknownRealtimeEvent -from mistralai.models import ( +from mistralai.client.models import ( AudioFormat, RealtimeTranscriptionError, TranscriptionStreamDone, diff --git a/examples/mistral/audio/chat_base64.py b/examples/mistral/audio/chat_base64.py index 8468fbfb..d6afb2ab 100755 --- a/examples/mistral/audio/chat_base64.py +++ b/examples/mistral/audio/chat_base64.py @@ -2,8 +2,8 @@ import base64 import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage def main(): diff --git a/examples/mistral/audio/chat_no_streaming.py b/examples/mistral/audio/chat_no_streaming.py index f10240bd..87237ec0 100755 --- a/examples/mistral/audio/chat_no_streaming.py +++ b/examples/mistral/audio/chat_no_streaming.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage def main(): diff --git a/examples/mistral/audio/chat_streaming.py b/examples/mistral/audio/chat_streaming.py index f9c913a0..a9ab2323 100755 --- a/examples/mistral/audio/chat_streaming.py +++ b/examples/mistral/audio/chat_streaming.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral, File -from mistralai.models import UserMessage +from mistralai.client import Mistral, File +from mistralai.client.models import UserMessage def main(): diff --git a/examples/mistral/audio/transcription_async.py b/examples/mistral/audio/transcription_async.py index 9092fc03..c8fd9ae6 100644 --- a/examples/mistral/audio/transcription_async.py +++ b/examples/mistral/audio/transcription_async.py @@ -2,7 +2,7 @@ import os import asyncio -from mistralai import Mistral, File +from mistralai.client import Mistral, File async def main(): diff --git a/examples/mistral/audio/transcription_diarize_async.py b/examples/mistral/audio/transcription_diarize_async.py index 26754837..cbdf3512 100644 --- a/examples/mistral/audio/transcription_diarize_async.py +++ b/examples/mistral/audio/transcription_diarize_async.py @@ -3,7 +3,7 @@ import os import asyncio import pathlib -from mistralai import Mistral, File +from mistralai.client import Mistral, File fixture_dir = pathlib.Path(__file__).parents[2] / "fixtures" diff --git a/examples/mistral/audio/transcription_segments.py b/examples/mistral/audio/transcription_segments.py index 626b83e2..3d691711 100644 --- a/examples/mistral/audio/transcription_segments.py +++ b/examples/mistral/audio/transcription_segments.py @@ -2,7 +2,7 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/audio/transcription_segments_stream.py b/examples/mistral/audio/transcription_segments_stream.py index bedfbd40..32edf951 100644 --- a/examples/mistral/audio/transcription_segments_stream.py +++ b/examples/mistral/audio/transcription_segments_stream.py @@ -2,7 +2,7 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/audio/transcription_stream_async.py b/examples/mistral/audio/transcription_stream_async.py index b7f553b3..6e64dcf7 100644 --- a/examples/mistral/audio/transcription_stream_async.py +++ b/examples/mistral/audio/transcription_stream_async.py @@ -2,7 +2,7 @@ import asyncio import os -from mistralai import Mistral, File +from mistralai.client import Mistral, File async def main(): diff --git a/examples/mistral/audio/transcription_url.py b/examples/mistral/audio/transcription_url.py index b194b50c..907f830d 100644 --- a/examples/mistral/audio/transcription_url.py +++ b/examples/mistral/audio/transcription_url.py @@ -2,7 +2,7 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/chat/async_chat_no_streaming.py b/examples/mistral/chat/async_chat_no_streaming.py index 9448f09d..ad45d0fd 100755 --- a/examples/mistral/chat/async_chat_no_streaming.py +++ b/examples/mistral/chat/async_chat_no_streaming.py @@ -3,8 +3,8 @@ import asyncio import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage async def main(): diff --git a/examples/mistral/chat/async_chat_with_image_no_streaming.py b/examples/mistral/chat/async_chat_with_image_no_streaming.py index efadff89..5d2cbdaa 100755 --- a/examples/mistral/chat/async_chat_with_image_no_streaming.py +++ b/examples/mistral/chat/async_chat_with_image_no_streaming.py @@ -4,8 +4,8 @@ import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage async def main(): diff --git a/examples/mistral/chat/async_chat_with_streaming.py b/examples/mistral/chat/async_chat_with_streaming.py index 1ef500ae..1642ea41 100755 --- a/examples/mistral/chat/async_chat_with_streaming.py +++ b/examples/mistral/chat/async_chat_with_streaming.py @@ -3,8 +3,8 @@ import asyncio import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage async def main(): diff --git a/examples/mistral/chat/async_structured_outputs.py b/examples/mistral/chat/async_structured_outputs.py index a512d38f..09ed5737 100644 --- a/examples/mistral/chat/async_structured_outputs.py +++ b/examples/mistral/chat/async_structured_outputs.py @@ -4,7 +4,7 @@ import os from pydantic import BaseModel -from mistralai import Mistral +from mistralai.client import Mistral async def main(): diff --git a/examples/mistral/chat/chat_no_streaming.py b/examples/mistral/chat/chat_no_streaming.py index 72506dd9..5f6968ca 100755 --- a/examples/mistral/chat/chat_no_streaming.py +++ b/examples/mistral/chat/chat_no_streaming.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage def main(): diff --git a/examples/mistral/chat/chat_prediction.py b/examples/mistral/chat/chat_prediction.py index 1ff87e3f..88c57e77 100644 --- a/examples/mistral/chat/chat_prediction.py +++ b/examples/mistral/chat/chat_prediction.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage def main(): diff --git a/examples/mistral/chat/chat_with_streaming.py b/examples/mistral/chat/chat_with_streaming.py index 66b167f1..94a3e29c 100755 --- a/examples/mistral/chat/chat_with_streaming.py +++ b/examples/mistral/chat/chat_with_streaming.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage def main(): diff --git a/examples/mistral/chat/chatbot_with_streaming.py b/examples/mistral/chat/chatbot_with_streaming.py index 8d47deb5..bbc3881f 100755 --- a/examples/mistral/chat/chatbot_with_streaming.py +++ b/examples/mistral/chat/chatbot_with_streaming.py @@ -8,8 +8,8 @@ import readline import sys -from mistralai import Mistral -from mistralai.models import AssistantMessage, SystemMessage, UserMessage +from mistralai.client import Mistral +from mistralai.client.models import AssistantMessage, SystemMessage, UserMessage MODEL_LIST = [ "mistral-small-latest", diff --git a/examples/mistral/chat/completion_with_streaming.py b/examples/mistral/chat/completion_with_streaming.py index 5bee2033..399e8638 100644 --- a/examples/mistral/chat/completion_with_streaming.py +++ b/examples/mistral/chat/completion_with_streaming.py @@ -3,7 +3,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral async def main(): diff --git a/examples/mistral/chat/function_calling.py b/examples/mistral/chat/function_calling.py index aba7d671..f0eb9e70 100644 --- a/examples/mistral/chat/function_calling.py +++ b/examples/mistral/chat/function_calling.py @@ -3,11 +3,11 @@ import os from typing import Dict, List -from mistralai import Mistral -from mistralai.models.assistantmessage import AssistantMessage -from mistralai.models.function import Function -from mistralai.models.toolmessage import ToolMessage -from mistralai.models.usermessage import UserMessage +from mistralai.client import Mistral +from mistralai.client.models.assistantmessage import AssistantMessage +from mistralai.client.models.function import Function +from mistralai.client.models.toolmessage import ToolMessage +from mistralai.client.models.usermessage import UserMessage # Assuming we have the following data data = { diff --git a/examples/mistral/chat/json_format.py b/examples/mistral/chat/json_format.py index 23c38680..8fa1416a 100755 --- a/examples/mistral/chat/json_format.py +++ b/examples/mistral/chat/json_format.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage def main(): diff --git a/examples/mistral/chat/structured_outputs.py b/examples/mistral/chat/structured_outputs.py index bc4a5e18..64521f46 100644 --- a/examples/mistral/chat/structured_outputs.py +++ b/examples/mistral/chat/structured_outputs.py @@ -3,7 +3,7 @@ import os from pydantic import BaseModel -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/chat/structured_outputs_with_json_schema.py b/examples/mistral/chat/structured_outputs_with_json_schema.py index 69ac9690..2f99f747 100644 --- a/examples/mistral/chat/structured_outputs_with_json_schema.py +++ b/examples/mistral/chat/structured_outputs_with_json_schema.py @@ -2,7 +2,7 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/chat/structured_outputs_with_pydantic.py b/examples/mistral/chat/structured_outputs_with_pydantic.py index 299f7509..ded9d52d 100644 --- a/examples/mistral/chat/structured_outputs_with_pydantic.py +++ b/examples/mistral/chat/structured_outputs_with_pydantic.py @@ -3,7 +3,7 @@ import os from pydantic import BaseModel -from mistralai import Mistral +from mistralai.client import Mistral from typing import List diff --git a/examples/mistral/classifier/async_classifier.py b/examples/mistral/classifier/async_classifier.py index 10c8bb76..d5ee6cc1 100644 --- a/examples/mistral/classifier/async_classifier.py +++ b/examples/mistral/classifier/async_classifier.py @@ -2,7 +2,7 @@ from pprint import pprint import asyncio -from mistralai import Mistral, TrainingFile, ClassifierTrainingParametersIn +from mistralai.client import Mistral, TrainingFile, ClassifierTrainingParametersIn import os diff --git a/examples/mistral/embeddings/async_embeddings.py b/examples/mistral/embeddings/async_embeddings.py index 781e87af..413769f3 100755 --- a/examples/mistral/embeddings/async_embeddings.py +++ b/examples/mistral/embeddings/async_embeddings.py @@ -3,7 +3,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral async def main(): diff --git a/examples/mistral/embeddings/embeddings.py b/examples/mistral/embeddings/embeddings.py index 046c87d4..64301ca0 100755 --- a/examples/mistral/embeddings/embeddings.py +++ b/examples/mistral/embeddings/embeddings.py @@ -2,7 +2,7 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/fim/async_code_completion.py b/examples/mistral/fim/async_code_completion.py index a6bc5717..cb6db241 100644 --- a/examples/mistral/fim/async_code_completion.py +++ b/examples/mistral/fim/async_code_completion.py @@ -3,7 +3,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral async def main(): diff --git a/examples/mistral/fim/code_completion.py b/examples/mistral/fim/code_completion.py index f3d70a68..4f25c59c 100644 --- a/examples/mistral/fim/code_completion.py +++ b/examples/mistral/fim/code_completion.py @@ -3,7 +3,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral async def main(): diff --git a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py index e728b8fa..8a1d8774 100644 --- a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py +++ b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py @@ -1,4 +1,4 @@ -from mistralai import Mistral, BatchRequest, UserMessage +from mistralai.client import Mistral, BatchRequest, UserMessage import os import asyncio diff --git a/examples/mistral/jobs/async_files.py b/examples/mistral/jobs/async_files.py index 4dc21542..4bec5237 100644 --- a/examples/mistral/jobs/async_files.py +++ b/examples/mistral/jobs/async_files.py @@ -3,8 +3,8 @@ import asyncio import os -from mistralai import Mistral -from mistralai.models import File +from mistralai.client import Mistral +from mistralai.client.models import File async def main(): diff --git a/examples/mistral/jobs/async_jobs.py b/examples/mistral/jobs/async_jobs.py index 44a58af1..12f9035e 100644 --- a/examples/mistral/jobs/async_jobs.py +++ b/examples/mistral/jobs/async_jobs.py @@ -3,8 +3,8 @@ import asyncio import os -from mistralai import Mistral -from mistralai.models import File, CompletionTrainingParametersIn +from mistralai.client import Mistral +from mistralai.client.models import File, CompletionTrainingParametersIn async def main(): diff --git a/examples/mistral/jobs/async_jobs_chat.py b/examples/mistral/jobs/async_jobs_chat.py index 80e598c7..f14fb833 100644 --- a/examples/mistral/jobs/async_jobs_chat.py +++ b/examples/mistral/jobs/async_jobs_chat.py @@ -5,8 +5,8 @@ import random from pathlib import Path -from mistralai import Mistral -from mistralai.models import ( +from mistralai.client import Mistral +from mistralai.client.models import ( File, CompletionTrainingParametersIn, ) diff --git a/examples/mistral/jobs/async_jobs_ocr_batch_annotation.py b/examples/mistral/jobs/async_jobs_ocr_batch_annotation.py index e62bca17..f209507d 100644 --- a/examples/mistral/jobs/async_jobs_ocr_batch_annotation.py +++ b/examples/mistral/jobs/async_jobs_ocr_batch_annotation.py @@ -7,9 +7,9 @@ import httpx from pydantic import BaseModel, Field -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra import response_format_from_pydantic_model -from mistralai.models import File +from mistralai.client.models import File SAMPLE_PDF_URL = "https://arxiv.org/pdf/2401.04088" diff --git a/examples/mistral/jobs/dry_run_job.py b/examples/mistral/jobs/dry_run_job.py index 84a2d0ce..d4280836 100644 --- a/examples/mistral/jobs/dry_run_job.py +++ b/examples/mistral/jobs/dry_run_job.py @@ -3,8 +3,8 @@ import asyncio import os -from mistralai import Mistral -from mistralai.models import CompletionTrainingParametersIn +from mistralai.client import Mistral +from mistralai.client.models import CompletionTrainingParametersIn async def main(): diff --git a/examples/mistral/jobs/files.py b/examples/mistral/jobs/files.py index 5dce880b..50f6472c 100644 --- a/examples/mistral/jobs/files.py +++ b/examples/mistral/jobs/files.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import File +from mistralai.client import Mistral +from mistralai.client.models import File def main(): diff --git a/examples/mistral/jobs/jobs.py b/examples/mistral/jobs/jobs.py index f65fda8e..be3a821f 100644 --- a/examples/mistral/jobs/jobs.py +++ b/examples/mistral/jobs/jobs.py @@ -1,8 +1,8 @@ #!/usr/bin/env python import os -from mistralai import Mistral -from mistralai.models import File, CompletionTrainingParametersIn +from mistralai.client import Mistral +from mistralai.client.models import File, CompletionTrainingParametersIn def main(): diff --git a/examples/mistral/libraries/async_libraries.py b/examples/mistral/libraries/async_libraries.py index b2f9d4c4..fc5e6541 100644 --- a/examples/mistral/libraries/async_libraries.py +++ b/examples/mistral/libraries/async_libraries.py @@ -3,8 +3,8 @@ import os import asyncio -from mistralai import Mistral -from mistralai.models import File +from mistralai.client import Mistral +from mistralai.client.models import File async def main(): diff --git a/examples/mistral/libraries/libraries.py b/examples/mistral/libraries/libraries.py index 88436540..8e4b2998 100644 --- a/examples/mistral/libraries/libraries.py +++ b/examples/mistral/libraries/libraries.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import File +from mistralai.client import Mistral +from mistralai.client.models import File def main(): diff --git a/examples/mistral/models/async_list_models.py b/examples/mistral/models/async_list_models.py index 4243d862..8b1ac503 100755 --- a/examples/mistral/models/async_list_models.py +++ b/examples/mistral/models/async_list_models.py @@ -3,7 +3,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral async def main(): diff --git a/examples/mistral/models/list_models.py b/examples/mistral/models/list_models.py index c6c0c855..9b68f806 100755 --- a/examples/mistral/models/list_models.py +++ b/examples/mistral/models/list_models.py @@ -2,7 +2,7 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/ocr/ocr_process_from_file.py b/examples/mistral/ocr/ocr_process_from_file.py index 84a7b4d8..9368ceeb 100644 --- a/examples/mistral/ocr/ocr_process_from_file.py +++ b/examples/mistral/ocr/ocr_process_from_file.py @@ -1,4 +1,4 @@ -from mistralai import Mistral +from mistralai.client import Mistral import os import json from pathlib import Path diff --git a/examples/mistral/ocr/ocr_process_from_url.py b/examples/mistral/ocr/ocr_process_from_url.py index 55f31282..4f3b0224 100644 --- a/examples/mistral/ocr/ocr_process_from_url.py +++ b/examples/mistral/ocr/ocr_process_from_url.py @@ -1,7 +1,7 @@ import json import os -from mistralai import Mistral +from mistralai.client import Mistral MISTRAL_7B_PDF_URL = "https://arxiv.org/pdf/2310.06825.pdf" From a7f5e1c0446baca8cc13084e2c364fa5a692b661 Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:35:56 +0100 Subject: [PATCH 12/42] ci: update lint script paths and add namespace guard - Update hooks path from _hooks/ to client/_hooks/ - Add check that src/mistralai/__init__.py must not exist (PEP 420) --- scripts/lint_custom_code.sh | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/scripts/lint_custom_code.sh b/scripts/lint_custom_code.sh index 7c084463..5bf9d675 100755 --- a/scripts/lint_custom_code.sh +++ b/scripts/lint_custom_code.sh @@ -2,13 +2,21 @@ ERRORS=0 +echo "Checking PEP 420 namespace integrity..." +if [ -f src/mistralai/__init__.py ]; then + echo "ERROR: PEP 420 violation - src/mistralai/__init__.py must not exist" + ERRORS=1 +else + echo "-> PEP 420 namespace OK" +fi + echo "Running mypy..." # TODO: Uncomment once the examples are fixed # uv run mypy examples/ || ERRORS=1 echo "-> running on extra" uv run mypy src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" -uv run mypy src/mistralai/_hooks/ \ +uv run mypy src/mistralai/client/_hooks/ \ --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 echo "-> running on scripts" uv run mypy scripts/ || ERRORS=1 @@ -19,7 +27,7 @@ echo "Running pyright..." echo "-> running on extra" uv run pyright src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" -uv run pyright src/mistralai/_hooks/ || ERRORS=1 +uv run pyright src/mistralai/client/_hooks/ || ERRORS=1 echo "-> running on scripts" uv run pyright scripts/ || ERRORS=1 @@ -29,7 +37,7 @@ uv run ruff check examples/ || ERRORS=1 echo "-> running on extra" uv run ruff check src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" -uv run ruff check src/mistralai/_hooks/ \ +uv run ruff check src/mistralai/client/_hooks/ \ --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 echo "-> running on scripts" uv run ruff check scripts/ || ERRORS=1 From 9d290ad34abc9e99f9d6ca5ef3296081809f98b6 Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:39:36 +0100 Subject: [PATCH 13/42] docs: add MIGRATION.md for v1 to v2 upgrade --- MIGRATION.md | 247 ++++----------------------------------------------- 1 file changed, 18 insertions(+), 229 deletions(-) diff --git a/MIGRATION.md b/MIGRATION.md index 7ccdf9c0..3333f6ba 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -1,242 +1,31 @@ +# Migration Guide: v1 to v2 -# Migration Guide for MistralAI Client from 0.\*.\* to 1.0.0 +## Import Changes -We have made significant changes to the `mistralai` library to improve its usability and consistency. This guide will help you migrate your code from the old client to the new one. +### Main Client -## Major Changes - -1. **Unified Client Class**: - - The `MistralClient` and `MistralAsyncClient` classes have been consolidated into a single `Mistral` class. - - This simplifies the API by providing a single entry point for both synchronous and asynchronous operations. - -2. **Method Names and Structure**: - - The method names and structure have been updated for better clarity and consistency. - - For example: - - `client.chat` is now `client.chat.complete` for non-streaming calls - - `client.chat_stream` is now `client.chat.stream` for streaming calls - - Async `client.chat` is now `client.chat.complete_async` for async non-streaming calls - - Async `client.chat_stream` is now `client.chat.stream_async` for async streaming calls - - -## Method changes - -### Sync - -| Old Methods | New Methods | -| -------------------------- | -------------------------------- | -| `MistralCLient` | `Mistral` | -| `client.chat` | `client.chat.complete` | -| `client.chat_stream` | `client.chat.stream` | -| `client.completions` | `client.fim.complete` | -| `client.completions_stream`| `client.fim.stream` | -| `client.embeddings` | `client.embeddings.create` | -| `client.list_models` | `client.models.list` | -| `client.delete_model` | `client.models.delete` | -| `client.files.create` | `client.files.upload` | -| `client.files.list` | `client.files.list` | -| `client.files.retrieve` | `client.files.retrieve` | -| `client.files.delete` | `client.files.delete` | -| `client.jobs.create` | `client.fine_tuning.jobs.create` | -| `client.jobs.list` | `client.fine_tuning.jobs.list` | -| `client.jobs.retrieve` | `client.fine_tuning.jobs.get` | -| `client.jobs.cancel` | `client.fine_tuning.jobs.cancel` | - -### Async - -| Old Methods | New Methods | -| -------------------------------- | -------------------------------------- | -| `MistralAsyncClient` | `Mistral` | -| `async_client.chat` | `client.chat.complete_async` | -| `async_client.chat_stream` | `client.chat.stream_async` | -| `async_client.completions` | `client.fim.complete_async` | -| `async_client.completions_stream`| `client.fim.stream_async` | -| `async_client.embeddings` | `client.embeddings.create_async` | -| `async_client.list_models` | `client.models.list_async` | -| `async_client.delete_model` | `client.models.delete_async` | -| `async_client.files.create` | `client.files.upload_async` | -| `async_client.files.list` | `client.files.list_async` | -| `async_client.files.retrieve` | `client.files.retrieve_async` | -| `async_client.files.delete` | `client.files.delete_async` | -| `async_client.jobs.create` | `client.fine_tuning.jobs.create_async` | -| `async_client.jobs.list` | `client.fine_tuning.jobs.list_async` | -| `async_client.jobs.retrieve` | `client.fine_tuning.jobs.get_async` | -| `async_client.jobs.cancel` | `client.fine_tuning.jobs.cancel_async` | - -### Message Changes - -The `ChatMessage` class has been replaced with a more flexible system. You can now use the `SystemMessage`, `UserMessage`, `AssistantMessage`, and `ToolMessage` classes to create messages. - -The return object of the stream call methods have been modified to `chunk.data.choices[0].delta.content` from `chunk.choices[0].delta.content`. - -## Example Migrations - -### Example 1: Non-Streaming Chat - -**Old:** ```python -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage - -api_key = os.environ["MISTRAL_API_KEY"] -model = "mistral-large-latest" +# v1 +from mistralai import Mistral -client = MistralClient(api_key=api_key) - -messages = [ - ChatMessage(role="user", content="What is the best French cheese?") -] - -# No streaming -chat_response = client.chat( - model=model, - messages=messages, -) - -print(chat_response.choices[0].message.content) +# v2 +from mistralai.client import Mistral ``` -**New:** +### Models and Types ```python -import os - -from mistralai import Mistral, UserMessage - -api_key = os.environ["MISTRAL_API_KEY"] -model = "mistral-large-latest" - -client = Mistral(api_key=api_key) - -messages = [ - { - "role": "user", - "content": "What is the best French cheese?", - }, -] -# Or using the new message classes -# messages = [ -# UserMessage(content="What is the best French cheese?"), -# ] - -chat_response = client.chat.complete( - model=model, - messages=messages, -) +# v1 +from mistralai.models import UserMessage -print(chat_response.choices[0].message.content) +# v2 +from mistralai.client.models import UserMessage ``` -### Example 2: Streaming Chat - -**Old:** - -```python -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage - -api_key = os.environ["MISTRAL_API_KEY"] -model = "mistral-large-latest" - -client = MistralClient(api_key=api_key) - -messages = [ - ChatMessage(role="user", content="What is the best French cheese?") -] - -# With streaming -stream_response = client.chat_stream(model=model, messages=messages) +## Quick Reference -for chunk in stream_response: - print(chunk.choices[0].delta.content) -``` -**New:** -```python -import os - -from mistralai import Mistral, UserMessage - -api_key = os.environ["MISTRAL_API_KEY"] -model = "mistral-large-latest" - -client = Mistral(api_key=api_key) - -messages = [ - { - "role": "user", - "content": "What is the best French cheese?", - }, -] -# Or using the new message classes -# messages = [ -# UserMessage(content="What is the best French cheese?"), -# ] - -stream_response = client.chat.stream( - model=model, - messages=messages, -) - -for chunk in stream_response: - print(chunk.data.choices[0].delta.content) - -``` - -### Example 3: Async - -**Old:** -```python -from mistralai.async_client import MistralAsyncClient -from mistralai.models.chat_completion import ChatMessage - -api_key = os.environ["MISTRAL_API_KEY"] -model = "mistral-large-latest" - -client = MistralAsyncClient(api_key=api_key) - -messages = [ - ChatMessage(role="user", content="What is the best French cheese?") -] - -# With async -async_response = client.chat_stream(model=model, messages=messages) - -async for chunk in async_response: - print(chunk.choices[0].delta.content) -``` - -**New:** -```python -import asyncio -import os - -from mistralai import Mistral, UserMessage - - -async def main(): - client = Mistral( - api_key=os.getenv("MISTRAL_API_KEY", ""), - ) - - messages = [ - { - "role": "user", - "content": "What is the best French cheese?", - }, - ] - # Or using the new message classes - # messages = [ - # UserMessage( - # content="What is the best French cheese?", - # ), - # ] - async_response = await client.chat.stream_async( - messages=messages, - model="mistral-large-latest", - ) - - async for chunk in async_response: - print(chunk.data.choices[0].delta.content) - - -asyncio.run(main()) -``` +| v1 | v2 | +|----|-----| +| `from mistralai import` | `from mistralai.client import` | +| `from mistralai.models` | `from mistralai.client.models` | +| `from mistralai.types` | `from mistralai.client.types` | From cf268e36ba234ead1bef936a2c772d1e02d4889e Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:39:55 +0100 Subject: [PATCH 14/42] docs: update README for v2 migration reference --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index e71b1a19..129e8ee0 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ # Mistral Python Client ## Migration warning - -This documentation is for Mistral AI SDK v1. You can find more details on how to migrate from v0 to v1 [here](MIGRATION.md) + +This documentation is for Mistral AI SDK v2. You can find more details on how to migrate from v1 to v2 [here](MIGRATION.md) ## API Key Setup From 52b8d3f190effc8601c7ec791d1eef77ec64108b Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 17:45:58 +0100 Subject: [PATCH 15/42] fix: restore custom SDK methods lost during regeneration Speakeasy's sdk-class-body regions were not copied when regenerating to the new mistralai.client namespace. Restored: - chat.py: parse, parse_async, parse_stream, parse_stream_async - conversations.py: run_async, run_stream_async - audio.py: realtime property Updated imports to use mistralai.client.* paths. --- src/mistralai/client/audio.py | 18 ++- src/mistralai/client/chat.py | 83 +++++++++- src/mistralai/client/conversations.py | 208 ++++++++++++++++++++++++++ 3 files changed, 307 insertions(+), 2 deletions(-) diff --git a/src/mistralai/client/audio.py b/src/mistralai/client/audio.py index 28ccda1b..e75d6dc8 100644 --- a/src/mistralai/client/audio.py +++ b/src/mistralai/client/audio.py @@ -3,13 +3,29 @@ from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration from mistralai.client.transcriptions import Transcriptions -from typing import Optional +from typing import Optional, TYPE_CHECKING + +if TYPE_CHECKING: + from mistralai.extra.realtime import RealtimeTranscription class Audio(BaseSDK): transcriptions: Transcriptions r"""API for audio transcription.""" + # region sdk-class-body + @property + def realtime(self) -> "RealtimeTranscription": + """Returns a client for real-time audio transcription via WebSocket.""" + if not hasattr(self, "_realtime"): + from mistralai.extra.realtime import RealtimeTranscription # pylint: disable=import-outside-toplevel + + self._realtime = RealtimeTranscription(self.sdk_configuration) # pylint: disable=attribute-defined-outside-init + + return self._realtime + + # endregion sdk-class-body + def __init__( self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None ) -> None: diff --git a/src/mistralai/client/chat.py b/src/mistralai/client/chat.py index 9c50bce8..056c652e 100644 --- a/src/mistralai/client/chat.py +++ b/src/mistralai/client/chat.py @@ -14,12 +14,93 @@ from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import eventstreaming, get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, List, Mapping, Optional, Union +from typing import Any, Dict, List, Mapping, Optional, Type, Union + +from mistralai.extra.struct_chat import ( + ParsedChatCompletionResponse, + convert_to_parsed_chat_completion_response, +) +from mistralai.extra.utils.response_format import ( + CustomPydanticModel, + response_format_from_pydantic_model, +) class Chat(BaseSDK): r"""Chat Completion API.""" + # region sdk-class-body + # Custom .parse methods for the Structure Outputs Feature. + + def parse( + self, response_format: Type[CustomPydanticModel], **kwargs: Any + ) -> ParsedChatCompletionResponse[CustomPydanticModel]: + """ + Parse the response using the provided response format. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .complete method + :return: The parsed response + """ + # Convert the input Pydantic Model to a strict JSON ready to be passed to chat.complete + json_response_format = response_format_from_pydantic_model(response_format) + # Run the inference + response = self.complete(**kwargs, response_format=json_response_format) + # Parse response back to the input pydantic model + parsed_response = convert_to_parsed_chat_completion_response( + response, response_format + ) + return parsed_response + + async def parse_async( + self, response_format: Type[CustomPydanticModel], **kwargs + ) -> ParsedChatCompletionResponse[CustomPydanticModel]: + """ + Asynchronously parse the response using the provided response format. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .complete method + :return: The parsed response + """ + json_response_format = response_format_from_pydantic_model(response_format) + response = await self.complete_async( # pylint: disable=E1125 + **kwargs, response_format=json_response_format + ) + parsed_response = convert_to_parsed_chat_completion_response( + response, response_format + ) + return parsed_response + + def parse_stream( + self, response_format: Type[CustomPydanticModel], **kwargs + ) -> eventstreaming.EventStream[models.CompletionEvent]: + """ + Parse the response using the provided response format. + For now the response will be in JSON format not in the input Pydantic model. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .stream method + :return: The JSON parsed response + """ + json_response_format = response_format_from_pydantic_model(response_format) + response = self.stream(**kwargs, response_format=json_response_format) + return response + + async def parse_stream_async( + self, response_format: Type[CustomPydanticModel], **kwargs + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + """ + Asynchronously parse the response using the provided response format. + For now the response will be in JSON format not in the input Pydantic model. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .stream method + :return: The JSON parsed response + """ + json_response_format = response_format_from_pydantic_model(response_format) + response = await self.stream_async( # pylint: disable=E1125 + **kwargs, response_format=json_response_format + ) + return response + + # endregion sdk-class-body + def complete( self, *, diff --git a/src/mistralai/client/conversations.py b/src/mistralai/client/conversations.py index 9caf4221..12390b14 100644 --- a/src/mistralai/client/conversations.py +++ b/src/mistralai/client/conversations.py @@ -18,10 +18,218 @@ from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Dict, List, Mapping, Optional, Union +# region imports +import typing +from typing import AsyncGenerator +import logging +from collections import defaultdict + +from mistralai.client.models import ( + ResponseStartedEvent, + ConversationEventsData, + InputEntries, +) +from mistralai.extra.run.result import ( + RunResult, + RunResultEvents, + FunctionResultEvent, + reconstitue_entries, +) +from mistralai.extra.run.utils import run_requirements +from mistralai.extra.observability.otel import GenAISpanEnum, get_or_create_otel_tracer + +logger = logging.getLogger(__name__) +tracing_enabled, tracer = get_or_create_otel_tracer() + +if typing.TYPE_CHECKING: + from mistralai.extra.run.context import RunContext + +# endregion imports + class Conversations(BaseSDK): r"""(beta) Conversations API""" + # region sdk-class-body + # Custom run code allowing client side execution of code + + @run_requirements + async def run_async( + self, + run_ctx: "RunContext", + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[List[models.Tools], List[models.ToolsTypedDict]] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> RunResult: + """Run a conversation with the given inputs and context. + + The execution of a run will only stop when no required local execution can be done.""" + from mistralai.client.beta import Beta + from mistralai.extra.run.context import _validate_run + from mistralai.extra.run.tools import get_function_calls + + with tracer.start_as_current_span(GenAISpanEnum.VALIDATE_RUN.value): + req, run_result, input_entries = await _validate_run( + beta_client=Beta(self.sdk_configuration), + run_ctx=run_ctx, + inputs=inputs, + instructions=instructions, + tools=tools, + completion_args=completion_args, + ) + + with tracer.start_as_current_span(GenAISpanEnum.CONVERSATION.value): + while True: + if run_ctx.conversation_id is None: + res = await self.start_async( + inputs=input_entries, + http_headers=http_headers, + name=name, + description=description, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + **req, # type: ignore + ) + run_result.conversation_id = res.conversation_id + run_ctx.conversation_id = res.conversation_id + logger.info( + f"Started Run with conversation with id {res.conversation_id}" + ) + else: + res = await self.append_async( + conversation_id=run_ctx.conversation_id, + inputs=input_entries, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + ) + run_ctx.request_count += 1 + run_result.output_entries.extend(res.outputs) + fcalls = get_function_calls(res.outputs) + if not fcalls: + logger.debug("No more function calls to execute") + break + else: + fresults = await run_ctx.execute_function_calls(fcalls) + run_result.output_entries.extend(fresults) + input_entries = typing.cast(list[InputEntries], fresults) + return run_result + + @run_requirements + async def run_stream_async( + self, + run_ctx: "RunContext", + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[List[models.Tools], List[models.ToolsTypedDict]] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> AsyncGenerator[Union[RunResultEvents, RunResult], None]: + """Similar to `run_async` but returns a generator which streams events. + + The last streamed object is the RunResult object which summarises what happened in the run.""" + from mistralai.client.beta import Beta + from mistralai.extra.run.context import _validate_run + from mistralai.extra.run.tools import get_function_calls + + req, run_result, input_entries = await _validate_run( + beta_client=Beta(self.sdk_configuration), + run_ctx=run_ctx, + inputs=inputs, + instructions=instructions, + tools=tools, + completion_args=completion_args, + ) + + async def run_generator() -> ( + AsyncGenerator[Union[RunResultEvents, RunResult], None] + ): + current_entries = input_entries + while True: + received_event_tracker: defaultdict[ + int, list[ConversationEventsData] + ] = defaultdict(list) + if run_ctx.conversation_id is None: + res = await self.start_stream_async( + inputs=current_entries, + http_headers=http_headers, + name=name, + description=description, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + **req, # type: ignore + ) + else: + res = await self.append_stream_async( + conversation_id=run_ctx.conversation_id, + inputs=current_entries, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + ) + async for event in res: + if ( + isinstance(event.data, ResponseStartedEvent) + and run_ctx.conversation_id is None + ): + run_result.conversation_id = event.data.conversation_id + run_ctx.conversation_id = event.data.conversation_id + logger.info( + f"Started Run with conversation with id {run_ctx.conversation_id}" + ) + if ( + output_index := getattr(event.data, "output_index", None) + ) is not None: + received_event_tracker[output_index].append(event.data) + yield typing.cast(RunResultEvents, event) + run_ctx.request_count += 1 + outputs = reconstitue_entries(received_event_tracker) + run_result.output_entries.extend(outputs) + fcalls = get_function_calls(outputs) + if not fcalls: + logger.debug("No more function calls to execute") + break + else: + fresults = await run_ctx.execute_function_calls(fcalls) + run_result.output_entries.extend(fresults) + for fresult in fresults: + yield RunResultEvents( + event="function.result", + data=FunctionResultEvent( + type="function.result", + result=fresult.result, + tool_call_id=fresult.tool_call_id, + ), + ) + current_entries = typing.cast(list[InputEntries], fresults) + yield run_result + + return run_generator() + + # endregion sdk-class-body + def start( self, *, From bc680d7415973962e09062f1414f467789c0afb8 Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 18:02:13 +0100 Subject: [PATCH 16/42] chore: update speakeasy lock files --- .speakeasy/gen.lock | 18 +++++++++--------- .speakeasy/workflow.lock | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 7aae1acb..345ea2c8 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -11,9 +11,9 @@ management: installationURL: https://github.com/mistralai/client-python.git published: true persistentEdits: - generation_id: edcb81a1-4bcb-439e-bfcb-f30eaac48c6a - pristine_commit_hash: b192b65dd75820612c5c672593ed322d420d2c73 - pristine_tree_hash: 869c5c810e502634a018e5792d4c2efe2686dbad + generation_id: b2306c28-6200-44c1-a856-ddd318359c15 + pristine_commit_hash: dc36861e5d8b9f4c91221be8f09dc13254755c9a + pristine_tree_hash: 640358903b623a1b0d7deabbb43f39e82676a1a1 features: python: additionalDependencies: 1.0.0 @@ -1882,8 +1882,8 @@ trackedFiles: pristine_git_object: c04abd21b5b7cb9b8ddfdb52ec67fffa7d21759a src/mistralai/client/audio.py: id: 7a8ed2e90d61 - last_write_checksum: sha1:9ecd271eedf02703b45e6bc4280df10ed2edbbc8 - pristine_git_object: 28ccda1b533b4cef31844bddae2289268b459a24 + last_write_checksum: sha1:941d0466d9ff5d07c30a6e41cf4434857518963a + pristine_git_object: 2834ade22ab137b7620bfd4318fba4bdd9ef087f src/mistralai/client/basesdk.py: id: 7518c67b81ea last_write_checksum: sha1:c10ba4619b8c70ff876304a93b432d4466cb7112 @@ -1898,16 +1898,16 @@ trackedFiles: pristine_git_object: b30003eae52be5e79838fe994cda8474068a43dc src/mistralai/client/chat.py: id: 7eba0f088d47 - last_write_checksum: sha1:46321214352946f2077a0f60c4c903c354a42da1 - pristine_git_object: 9c50bce81c264c70256b2ff8716e88216a78535f + last_write_checksum: sha1:53558e4f3e5ecc8d2cea51d2462aa3432d8c156e + pristine_git_object: 6fa210bb01b193e1bd034431923a3d4dc8c8a16c src/mistralai/client/classifiers.py: id: 26e773725732 last_write_checksum: sha1:b3bed5a404f8837cc12e516f3fb85f47fd37518a pristine_git_object: 537e2438afcb570a3e436ab4dd8b7d604b35b627 src/mistralai/client/conversations.py: id: 40692a878064 - last_write_checksum: sha1:fc75dc4099891c8cbfbcc72284bf8e7dbbb834a5 - pristine_git_object: 9caf42214daf262b15bac5b36467700ee17cd7d1 + last_write_checksum: sha1:fedcc53385d833f18fdd393591cb156bc5e5f3d1 + pristine_git_object: 285beddbd175fee210b697d4714c28196c1fa7a2 src/mistralai/client/documents.py: id: bcc17286c31c last_write_checksum: sha1:82287ef513f2f5ee1acb9ffe8323f2dad0fc86f4 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 38b7899c..a0e535c2 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -39,7 +39,7 @@ targets: sourceRevisionDigest: sha256:4e49849eba5334a3fe4a3d081baa9afdecd8f41dfc4c2a5115bc19ead4d92d13 sourceBlobDigest: sha256:3ab3c61ac6a4e9fab37d924d516838ca27dd7e57a1b5e9059d4db2ef29efec56 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:deaa27e908bb7bee4f2ad753a92beb5749805f3f160eb56c5988b336d31a531c + codeSamplesRevisionDigest: sha256:debd698577e8da014e900a57194128d867ad76fd0d2e2b361e9d0c298700fc67 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.685.0 From d4b4b2920b7b1a6566e413f44ebbdf3adbfa875d Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 18:02:17 +0100 Subject: [PATCH 17/42] fix: add region markers for speakeasy custom code preservation - chat.py: wrap custom imports in # region imports block - audio.py: wrap TYPE_CHECKING import in # region imports block - conversations.py: add pylint disable comments, fix else-after-break These markers ensure speakeasy regeneration preserves custom code. --- src/mistralai/client/audio.py | 30 +++++++++-------- src/mistralai/client/chat.py | 6 +++- src/mistralai/client/conversations.py | 48 +++++++++++++-------------- 3 files changed, 45 insertions(+), 39 deletions(-) diff --git a/src/mistralai/client/audio.py b/src/mistralai/client/audio.py index e75d6dc8..2834ade2 100644 --- a/src/mistralai/client/audio.py +++ b/src/mistralai/client/audio.py @@ -3,16 +3,32 @@ from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration from mistralai.client.transcriptions import Transcriptions -from typing import Optional, TYPE_CHECKING +from typing import Optional + +# region imports +from typing import TYPE_CHECKING if TYPE_CHECKING: from mistralai.extra.realtime import RealtimeTranscription +# endregion imports class Audio(BaseSDK): transcriptions: Transcriptions r"""API for audio transcription.""" + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.transcriptions = Transcriptions( + self.sdk_configuration, parent_ref=self.parent_ref + ) + # region sdk-class-body @property def realtime(self) -> "RealtimeTranscription": @@ -25,15 +41,3 @@ def realtime(self) -> "RealtimeTranscription": return self._realtime # endregion sdk-class-body - - def __init__( - self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None - ) -> None: - BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) - self.sdk_configuration = sdk_config - self._init_sdks() - - def _init_sdks(self): - self.transcriptions = Transcriptions( - self.sdk_configuration, parent_ref=self.parent_ref - ) diff --git a/src/mistralai/client/chat.py b/src/mistralai/client/chat.py index 056c652e..6fa210bb 100644 --- a/src/mistralai/client/chat.py +++ b/src/mistralai/client/chat.py @@ -14,7 +14,10 @@ from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import eventstreaming, get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, List, Mapping, Optional, Type, Union +from typing import Any, Dict, List, Mapping, Optional, Union + +# region imports +from typing import Type from mistralai.extra.struct_chat import ( ParsedChatCompletionResponse, @@ -24,6 +27,7 @@ CustomPydanticModel, response_format_from_pydantic_model, ) +# endregion imports class Chat(BaseSDK): diff --git a/src/mistralai/client/conversations.py b/src/mistralai/client/conversations.py index 12390b14..285beddb 100644 --- a/src/mistralai/client/conversations.py +++ b/src/mistralai/client/conversations.py @@ -75,9 +75,9 @@ async def run_async( """Run a conversation with the given inputs and context. The execution of a run will only stop when no required local execution can be done.""" - from mistralai.client.beta import Beta - from mistralai.extra.run.context import _validate_run - from mistralai.extra.run.tools import get_function_calls + from mistralai.client.beta import Beta # pylint: disable=import-outside-toplevel + from mistralai.extra.run.context import _validate_run # pylint: disable=import-outside-toplevel + from mistralai.extra.run.tools import get_function_calls # pylint: disable=import-outside-toplevel with tracer.start_as_current_span(GenAISpanEnum.VALIDATE_RUN.value): req, run_result, input_entries = await _validate_run( @@ -104,7 +104,7 @@ async def run_async( ) run_result.conversation_id = res.conversation_id run_ctx.conversation_id = res.conversation_id - logger.info( + logger.info( # pylint: disable=logging-fstring-interpolation f"Started Run with conversation with id {res.conversation_id}" ) else: @@ -121,10 +121,9 @@ async def run_async( if not fcalls: logger.debug("No more function calls to execute") break - else: - fresults = await run_ctx.execute_function_calls(fcalls) - run_result.output_entries.extend(fresults) - input_entries = typing.cast(list[InputEntries], fresults) + fresults = await run_ctx.execute_function_calls(fcalls) + run_result.output_entries.extend(fresults) + input_entries = typing.cast(list[InputEntries], fresults) return run_result @run_requirements @@ -149,9 +148,9 @@ async def run_stream_async( """Similar to `run_async` but returns a generator which streams events. The last streamed object is the RunResult object which summarises what happened in the run.""" - from mistralai.client.beta import Beta - from mistralai.extra.run.context import _validate_run - from mistralai.extra.run.tools import get_function_calls + from mistralai.client.beta import Beta # pylint: disable=import-outside-toplevel + from mistralai.extra.run.context import _validate_run # pylint: disable=import-outside-toplevel + from mistralai.extra.run.tools import get_function_calls # pylint: disable=import-outside-toplevel req, run_result, input_entries = await _validate_run( beta_client=Beta(self.sdk_configuration), @@ -196,7 +195,7 @@ async def run_generator() -> ( ): run_result.conversation_id = event.data.conversation_id run_ctx.conversation_id = event.data.conversation_id - logger.info( + logger.info( # pylint: disable=logging-fstring-interpolation f"Started Run with conversation with id {run_ctx.conversation_id}" ) if ( @@ -211,19 +210,18 @@ async def run_generator() -> ( if not fcalls: logger.debug("No more function calls to execute") break - else: - fresults = await run_ctx.execute_function_calls(fcalls) - run_result.output_entries.extend(fresults) - for fresult in fresults: - yield RunResultEvents( - event="function.result", - data=FunctionResultEvent( - type="function.result", - result=fresult.result, - tool_call_id=fresult.tool_call_id, - ), - ) - current_entries = typing.cast(list[InputEntries], fresults) + fresults = await run_ctx.execute_function_calls(fcalls) + run_result.output_entries.extend(fresults) + for fresult in fresults: + yield RunResultEvents( + event="function.result", + data=FunctionResultEvent( + type="function.result", + result=fresult.result, + tool_call_id=fresult.tool_call_id, + ), + ) + current_entries = typing.cast(list[InputEntries], fresults) yield run_result return run_generator() From a59414159754b0048d0f0c9193ce88ccf0548adf Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 18:51:35 +0100 Subject: [PATCH 18/42] ci: update publish workflow for v1/v2 dual-branch support - Auto-publish from v1 branch on RELEASES.md changes - Require manual confirmation ("publish") for main branch deployments - Prevents accidental v2.0.0 release before it's ready This allows merging the v2 namespace migration to main safely while maintaining v1.x releases from the v1 branch. --- .github/workflows/sdk_publish_mistralai_sdk.yaml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/sdk_publish_mistralai_sdk.yaml b/.github/workflows/sdk_publish_mistralai_sdk.yaml index 0a225d70..f12ea5c4 100644 --- a/.github/workflows/sdk_publish_mistralai_sdk.yaml +++ b/.github/workflows/sdk_publish_mistralai_sdk.yaml @@ -6,14 +6,23 @@ permissions: statuses: write "on": workflow_dispatch: + inputs: + confirm_publish: + description: 'Type "publish" to confirm deployment from main branch' + required: false + type: string push: branches: - - main + - v1 paths: - RELEASES.md - "*/RELEASES.md" jobs: publish: + # Auto-publish from v1 branch; require manual confirmation from main + if: | + github.ref == 'refs/heads/v1' || + (github.event_name == 'workflow_dispatch' && github.event.inputs.confirm_publish == 'publish') uses: speakeasy-api/sdk-generation-action/.github/workflows/sdk-publish.yaml@7951d9dce457425b900b2dd317253499d98c2587 # v15 secrets: github_access_token: ${{ secrets.GITHUB_TOKEN }} From 48e7d75227df7d0cd7066b1570a26160ba00d4f8 Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Tue, 10 Feb 2026 11:00:57 +0100 Subject: [PATCH 19/42] =?UTF-8?q?docs:=20expand=20MIGRATION.md=20with=20v0?= =?UTF-8?q?=E2=86=92v1=20and=20v1=E2=86=92v2=20guides?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add v1→v2 section explaining PEP 420 namespace change - Explain motivation (azure/gcp companion packages) - Include automated migration sed commands - Preserve v0→v1 method mapping tables and examples --- MIGRATION.md | 164 ++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 150 insertions(+), 14 deletions(-) diff --git a/MIGRATION.md b/MIGRATION.md index 3333f6ba..4ab7f2ff 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -1,31 +1,167 @@ -# Migration Guide: v1 to v2 +# Migration Guide -## Import Changes +This guide covers migrating between major versions of the Mistral Python SDK. -### Main Client +--- + +## Migrating from v1.x to v2.x + +Version 2.0 updates the import paths from `mistralai` to `mistralai.client`. + +### Import Changes + +All imports move from `mistralai` to `mistralai.client`: ```python # v1 from mistralai import Mistral +from mistralai.models import UserMessage, AssistantMessage +from mistralai.types import BaseModel # v2 from mistralai.client import Mistral +from mistralai.client.models import UserMessage, AssistantMessage +from mistralai.client.types import BaseModel ``` -### Models and Types +### Quick Reference + +| v1 | v2 | +|---|---| +| `from mistralai import Mistral` | `from mistralai.client import Mistral` | +| `from mistralai.models import ...` | `from mistralai.client.models import ...` | +| `from mistralai.types import ...` | `from mistralai.client.types import ...` | +| `from mistralai.utils import ...` | `from mistralai.client.utils import ...` | + +### What Stays the Same + +- All method names and signatures remain identical +- The `Mistral` client API is unchanged +- All models (`UserMessage`, `AssistantMessage`, etc.) work the same way + +--- + +## Migrating from v0.x to v1.x + +Version 1.0 introduced significant changes to improve usability and consistency. + +### Major Changes + +1. **Unified Client Class**: `MistralClient` and `MistralAsyncClient` consolidated into a single `Mistral` class +2. **Method Structure**: Methods reorganized into resource-based groups (e.g., `client.chat.complete()`) +3. **Message Classes**: `ChatMessage` replaced with typed classes (`UserMessage`, `AssistantMessage`, etc.) +4. **Streaming Response**: Stream chunks now accessed via `chunk.data.choices[0].delta.content` +### Method Mapping + +#### Sync Methods + +| v0.x | v1.x | +|---|---| +| `MistralClient` | `Mistral` | +| `client.chat` | `client.chat.complete` | +| `client.chat_stream` | `client.chat.stream` | +| `client.completions` | `client.fim.complete` | +| `client.completions_stream` | `client.fim.stream` | +| `client.embeddings` | `client.embeddings.create` | +| `client.list_models` | `client.models.list` | +| `client.delete_model` | `client.models.delete` | +| `client.files.create` | `client.files.upload` | +| `client.jobs.create` | `client.fine_tuning.jobs.create` | +| `client.jobs.list` | `client.fine_tuning.jobs.list` | +| `client.jobs.retrieve` | `client.fine_tuning.jobs.get` | +| `client.jobs.cancel` | `client.fine_tuning.jobs.cancel` | + +#### Async Methods + +| v0.x | v1.x | +|---|---| +| `MistralAsyncClient` | `Mistral` | +| `async_client.chat` | `client.chat.complete_async` | +| `async_client.chat_stream` | `client.chat.stream_async` | +| `async_client.completions` | `client.fim.complete_async` | +| `async_client.completions_stream` | `client.fim.stream_async` | +| `async_client.embeddings` | `client.embeddings.create_async` | +| `async_client.list_models` | `client.models.list_async` | +| `async_client.files.create` | `client.files.upload_async` | +| `async_client.jobs.create` | `client.fine_tuning.jobs.create_async` | +| `async_client.jobs.list` | `client.fine_tuning.jobs.list_async` | +| `async_client.jobs.retrieve` | `client.fine_tuning.jobs.get_async` | +| `async_client.jobs.cancel` | `client.fine_tuning.jobs.cancel_async` | + +### Example: Non-Streaming Chat + +**v0.x:** ```python -# v1 -from mistralai.models import UserMessage +from mistralai.client import MistralClient +from mistralai.models.chat_completion import ChatMessage -# v2 -from mistralai.client.models import UserMessage +client = MistralClient(api_key=api_key) + +messages = [ChatMessage(role="user", content="What is the best French cheese?")] +response = client.chat(model="mistral-large-latest", messages=messages) + +print(response.choices[0].message.content) ``` -## Quick Reference +**v1.x:** +```python +from mistralai import Mistral, UserMessage -| v1 | v2 | -|----|-----| -| `from mistralai import` | `from mistralai.client import` | -| `from mistralai.models` | `from mistralai.client.models` | -| `from mistralai.types` | `from mistralai.client.types` | +client = Mistral(api_key=api_key) + +messages = [UserMessage(content="What is the best French cheese?")] +response = client.chat.complete(model="mistral-large-latest", messages=messages) + +print(response.choices[0].message.content) +``` + +### Example: Streaming Chat + +**v0.x:** +```python +from mistralai.client import MistralClient +from mistralai.models.chat_completion import ChatMessage + +client = MistralClient(api_key=api_key) +messages = [ChatMessage(role="user", content="What is the best French cheese?")] + +for chunk in client.chat_stream(model="mistral-large-latest", messages=messages): + print(chunk.choices[0].delta.content) +``` + +**v1.x:** +```python +from mistralai import Mistral, UserMessage + +client = Mistral(api_key=api_key) +messages = [UserMessage(content="What is the best French cheese?")] + +for chunk in client.chat.stream(model="mistral-large-latest", messages=messages): + print(chunk.data.choices[0].delta.content) # Note: chunk.data +``` + +### Example: Async Streaming + +**v0.x:** +```python +from mistralai.async_client import MistralAsyncClient +from mistralai.models.chat_completion import ChatMessage + +client = MistralAsyncClient(api_key=api_key) +messages = [ChatMessage(role="user", content="What is the best French cheese?")] + +async for chunk in client.chat_stream(model="mistral-large-latest", messages=messages): + print(chunk.choices[0].delta.content) +``` + +**v1.x:** +```python +from mistralai import Mistral, UserMessage + +client = Mistral(api_key=api_key) +messages = [UserMessage(content="What is the best French cheese?")] + +async for chunk in await client.chat.stream_async(model="mistral-large-latest", messages=messages): + print(chunk.data.choices[0].delta.content) +``` From 955b83acbc4ea9b464322990a1e82500a7afc40b Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Tue, 10 Feb 2026 14:20:46 +0100 Subject: [PATCH 20/42] ci: add warning to publish workflow about v2 alpha status --- .github/workflows/sdk_publish_mistralai_sdk.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sdk_publish_mistralai_sdk.yaml b/.github/workflows/sdk_publish_mistralai_sdk.yaml index f12ea5c4..44635571 100644 --- a/.github/workflows/sdk_publish_mistralai_sdk.yaml +++ b/.github/workflows/sdk_publish_mistralai_sdk.yaml @@ -8,7 +8,7 @@ permissions: workflow_dispatch: inputs: confirm_publish: - description: 'Type "publish" to confirm deployment from main branch' + description: 'WARNING: This will publish v2 SDK (mistralai.client namespace) which is still WIP/alpha. To publish v1 (mistralai namespace), use the v1 branch instead. Type "publish" to confirm.' required: false type: string push: From d4325cbdbf80d9d28d43ded3206b085f6c19eb4f Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Tue, 10 Feb 2026 14:39:52 +0100 Subject: [PATCH 21/42] fix: handle null outputs and add timeout in batch job example - Add null check for job.outputs to prevent crash when API returns no outputs - Add CANCELLED to terminal states - Add 5 minute timeout to prevent infinite polling --- .../async_batch_job_chat_completion_inline.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py index 8a1d8774..8b4cedd3 100644 --- a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py +++ b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py @@ -26,14 +26,23 @@ async def main(): print(f"Created job with ID: {job.id}") - while job.status not in ["SUCCESS", "FAILED"]: + max_wait = 60 # 1 minute timeout for CI + elapsed = 0 + while job.status not in ["SUCCESS", "FAILED", "CANCELLED"]: await asyncio.sleep(1) + elapsed += 1 + if elapsed >= max_wait: + print(f"Timeout after {max_wait}s, job still {job.status}") + return job = await client.batch.jobs.get_async(job_id=job.id) print(f"Job status: {job.status}") print(f"Job is done, status {job.status}") - for res in job.outputs: - print(res["response"]["body"]) + if job.outputs: + for res in job.outputs: + print(res["response"]["body"]) + else: + print(f"No outputs (succeeded: {job.succeeded_requests}, failed: {job.failed_requests})") if __name__ == "__main__": asyncio.run(main()) From 1f932e842bf3f93a7b80bc11bb91ea878b2aeaee Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Tue, 10 Feb 2026 15:06:10 +0100 Subject: [PATCH 22/42] fix: simplify async_conversation_run example to reduce CI flakiness The original example used code_interpreter with differential equations, which caused timeouts and flaky CI failures. Simplified to "2+2" math. Original complex example preserved as async_conversation_run_code_interpreter.py and added to CI skip list (too slow/flaky for CI). --- .../mistral/agents/async_conversation_run.py | 39 ++----------- ...async_conversation_run_code_interpreter.py | 57 +++++++++++++++++++ scripts/run_examples.sh | 1 + 3 files changed, 63 insertions(+), 34 deletions(-) create mode 100644 examples/mistral/agents/async_conversation_run_code_interpreter.py diff --git a/examples/mistral/agents/async_conversation_run.py b/examples/mistral/agents/async_conversation_run.py index 10c81d77..bb96ed78 100644 --- a/examples/mistral/agents/async_conversation_run.py +++ b/examples/mistral/agents/async_conversation_run.py @@ -9,48 +9,19 @@ MODEL = "mistral-medium-2505" -def math_question_generator(question_num: int): - """Random generator of mathematical question - - Args: - question_num (int): the number of the question that will be returned, should be between 1-100 - """ - return ( - "solve the following differential equation: `y'' + 3y' + 2y = 0`" - if question_num % 2 == 0 - else "solve the following differential equation: `y'' - 4y' + 4y = e^x`" - ) - - async def main(): api_key = os.environ["MISTRAL_API_KEY"] client = Mistral(api_key=api_key) - class Explanation(BaseModel): - explanation: str - output: str - - class MathDemonstration(BaseModel): - steps: list[Explanation] - final_answer: str + class MathResult(BaseModel): + answer: int - async with RunContext(model=MODEL, output_format=MathDemonstration) as run_ctx: - # register a new function that can be executed on the client side - run_ctx.register_func(math_question_generator) + async with RunContext(model=MODEL, output_format=MathResult) as run_ctx: run_result = await client.beta.conversations.run_async( run_ctx=run_ctx, - instructions="Use the code interpreter to help you when asked mathematical questions.", - inputs=[ - {"role": "user", "content": "hey"}, - {"role": "assistant", "content": "hello"}, - {"role": "user", "content": "Request a math question and answer it."}, - ], - tools=[{"type": "code_interpreter"}], + inputs=[{"role": "user", "content": "What is 2 + 2?"}], ) - print("All run entries:") - for entry in run_result.output_entries: - print(f"{entry}") - print(f"Final model: {run_result.output_as_model}") + print(f"Result: {run_result.output_as_model}") if __name__ == "__main__": diff --git a/examples/mistral/agents/async_conversation_run_code_interpreter.py b/examples/mistral/agents/async_conversation_run_code_interpreter.py new file mode 100644 index 00000000..10c81d77 --- /dev/null +++ b/examples/mistral/agents/async_conversation_run_code_interpreter.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python +import asyncio +import os + +from mistralai.client import Mistral +from mistralai.extra.run.context import RunContext +from mistralai.client.types import BaseModel + +MODEL = "mistral-medium-2505" + + +def math_question_generator(question_num: int): + """Random generator of mathematical question + + Args: + question_num (int): the number of the question that will be returned, should be between 1-100 + """ + return ( + "solve the following differential equation: `y'' + 3y' + 2y = 0`" + if question_num % 2 == 0 + else "solve the following differential equation: `y'' - 4y' + 4y = e^x`" + ) + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + class Explanation(BaseModel): + explanation: str + output: str + + class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + async with RunContext(model=MODEL, output_format=MathDemonstration) as run_ctx: + # register a new function that can be executed on the client side + run_ctx.register_func(math_question_generator) + run_result = await client.beta.conversations.run_async( + run_ctx=run_ctx, + instructions="Use the code interpreter to help you when asked mathematical questions.", + inputs=[ + {"role": "user", "content": "hey"}, + {"role": "assistant", "content": "hello"}, + {"role": "user", "content": "Request a math question and answer it."}, + ], + tools=[{"type": "code_interpreter"}], + ) + print("All run entries:") + for entry in run_result.output_entries: + print(f"{entry}") + print(f"Final model: {run_result.output_as_model}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index 5bc6fc48..40ff2c8f 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -33,6 +33,7 @@ exclude_files=( "examples/mistral/classifier/async_classifier.py" "examples/mistral/mcp_servers/sse_server.py" "examples/mistral/mcp_servers/stdio_server.py" + "examples/mistral/agents/async_conversation_run_code_interpreter.py" "examples/mistral/agents/async_conversation_run_stream.py" "examples/mistral/agents/async_conversation_run_mcp.py" "examples/mistral/agents/async_conversation_run_mcp_remote.py" From 6475b1caefd308c35e48d469a4bc9a314a81a10a Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Wed, 11 Feb 2026 10:10:34 +0100 Subject: [PATCH 23/42] chore: update speakeasy gen.yaml with recommended v2 configs (#345) * chore: update speakeasy gen.yaml with recommended v2 configs Update generation fixes: - Enable nameResolutionFeb2025, securityFeb2025, sharedErrorComponentsApr2025 - Add methodSignaturesApr2024 and sharedNestedComponentsJan2026 Update python section: - Enable preApplyUnionDiscriminators - Add forwardCompatibleEnumsByDefault: true - Add forwardCompatibleUnionsByDefault: tagged-only - Add flatAdditionalProperties: true Bump version to 2.0.0a2. * chore: regenerate SDK with new speakeasy configs Regenerated with speakeasy v1.685.0 using the updated gen.yaml config. Key changes from the new config flags: - Forward-compatible enums (accept unknown values) - Forward-compatible tagged unions (Unknown variant) - Updated type names from name resolution fixes - Flat additional properties * fix: update extra module and docs for new speakeasy types - Update type references: Tools -> ConversationRequestTool - Remove type= parameter from FunctionTool constructor (now a constant) - Use isinstance() check instead of .type attribute access - Document type name changes in MIGRATION.md * chore: bump version to 2.0.0a2 * fix: fix example types and enable mypy in CI - Rename azure examples from .py.py to .py - Fix message types in azure and mistral examples - Add type annotations where needed for mypy - Enable mypy for examples in lint_custom_code.sh --- .speakeasy/gen.lock | 778 ++++++----------- .speakeasy/gen.yaml | 15 +- ...-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock | 799 ++++++++++++++++++ MIGRATION.md | 14 +- README.md | 50 +- docs/models/agent.md | 2 +- docs/models/agentcreationrequest.md | 20 +- docs/models/agentcreationrequesttool.md | 41 + docs/models/agenthandoffdoneevent.md | 16 +- docs/models/agenthandoffdoneeventtype.md | 8 - docs/models/agenthandoffstartedevent.md | 16 +- docs/models/agenthandoffstartedeventtype.md | 8 - ...md => agentsapiv1agentsgetagentversion.md} | 2 +- docs/models/agentsapiv1agentsgetrequest.md | 8 +- ...> agentsapiv1conversationslistresponse.md} | 2 +- docs/models/agentscompletionrequest.md | 2 +- ...s.md => agentscompletionrequestmessage.md} | 2 +- docs/models/agentscompletionstreamrequest.md | 2 +- ...> agentscompletionstreamrequestmessage.md} | 2 +- .../agentscompletionstreamrequestmessages.md | 29 - docs/models/{tools.md => agenttool.md} | 2 +- docs/models/agentupdaterequest.md | 22 +- ...tiontools.md => agentupdaterequesttool.md} | 2 +- docs/models/audiochunk.md | 8 +- docs/models/audiochunktype.md | 8 - docs/models/basemodelcard.md | 2 +- docs/models/basemodelcardtype.md | 8 - docs/models/chatcompletionchoice.md | 10 +- ...md => chatcompletionchoicefinishreason.md} | 2 +- docs/models/chatcompletionrequest.md | 4 +- ...one.md => chatcompletionrequestmessage.md} | 2 +- .../{stop.md => chatcompletionrequeststop.md} | 2 +- docs/models/chatcompletionstreamrequest.md | 2 +- ... => chatcompletionstreamrequestmessage.md} | 2 +- docs/models/chatmoderationrequest.md | 8 +- docs/models/chatmoderationrequestinputs.md | 19 - ...ges.md => chatmoderationrequestinputs1.md} | 2 +- docs/models/chatmoderationrequestinputs2.md | 29 + docs/models/chatmoderationrequestinputs3.md | 19 + docs/models/classifierdetailedjobout.md | 4 +- ...=> classifierdetailedjoboutintegration.md} | 2 +- .../models/classifierdetailedjoboutjobtype.md | 8 - docs/models/classifierftmodelout.md | 36 +- docs/models/classifierftmodeloutmodeltype.md | 8 - docs/models/classifierjobout.md | 4 +- ...ions.md => classifierjoboutintegration.md} | 2 +- docs/models/classifierjoboutjobtype.md | 10 - docs/models/codeinterpretertool.md | 6 +- docs/models/codeinterpretertooltype.md | 8 - docs/models/completiondetailedjobout.md | 6 +- ...=> completiondetailedjoboutintegration.md} | 2 +- .../models/completiondetailedjoboutjobtype.md | 8 - ... => completiondetailedjoboutrepository.md} | 2 +- docs/models/completionftmodelout.md | 2 +- docs/models/completionjobout.md | 8 +- ...ions.md => completionjoboutintegration.md} | 2 +- ...ories.md => completionjoboutrepository.md} | 2 +- .../{status.md => completionjoboutstatus.md} | 2 +- docs/models/conversationhistory.md | 2 +- docs/models/conversationrequest.md | 30 +- ....md => conversationrequestagentversion.md} | 2 +- ...=> conversationrequesthandoffexecution.md} | 2 +- ...esttools.md => conversationrequesttool.md} | 2 +- docs/models/conversationresponse.md | 2 +- docs/models/conversationstreamrequest.md | 2 +- ...ls.md => conversationstreamrequesttool.md} | 2 +- docs/models/conversationstreamrequesttools.md | 41 - docs/models/deltamessage.md | 10 +- .../{content.md => deltamessagecontent.md} | 2 +- docs/models/documentlibrarytool.md | 8 +- docs/models/documentlibrarytooltype.md | 8 - ...dv1documentupload.md => documentupload.md} | 2 +- docs/models/{entries.md => entry.md} | 2 +- docs/models/ftmodelcard.md | 2 +- docs/models/ftmodelcardtype.md | 8 - docs/models/functioncallevent.md | 18 +- docs/models/functioncalleventtype.md | 8 - docs/models/functiontool.md | 8 +- docs/models/functiontooltype.md | 8 - docs/models/githubrepositoryin.md | 16 +- docs/models/githubrepositoryintype.md | 8 - docs/models/githubrepositoryout.md | 16 +- docs/models/githubrepositoryouttype.md | 8 - docs/models/imagegenerationtool.md | 6 +- docs/models/imagegenerationtooltype.md | 8 - docs/models/imageurlchunk.md | 2 +- ...geurlchunkimageurl.md => imageurlunion.md} | 2 +- docs/models/{messages.md => inputsmessage.md} | 2 +- docs/models/instructrequest.md | 6 +- docs/models/instructrequestinputs.md | 6 +- .../{two.md => instructrequestmessage.md} | 2 +- docs/models/jobin.md | 4 +- ...binintegrations.md => jobinintegration.md} | 2 +- ...obinrepositories.md => jobinrepository.md} | 2 +- ...esfinetuningcreatefinetuningjobresponse.md | 4 +- ...outesfinetuninggetfinetuningjobsrequest.md | 24 +- ...outesfinetuninggetfinetuningjobsstatus.md} | 2 +- docs/models/jobtype.md | 10 - .../librariesdocumentsuploadv1request.md | 8 +- docs/models/messageinputentry.md | 20 +- .../{object.md => messageinputentryobject.md} | 2 +- docs/models/messageoutputevent.md | 2 +- docs/models/messageoutputeventtype.md | 8 - docs/models/modelconversation.md | 2 +- ...agenttools.md => modelconversationtool.md} | 2 +- docs/models/modellist.md | 8 +- docs/models/{data.md => modellistdata.md} | 2 +- docs/models/modeltype.md | 8 - ...rtbodyparams.md => multipartbodyparams.md} | 2 +- docs/models/{outputs.md => output.md} | 2 +- .../realtimetranscriptionerrordetail.md | 8 +- ...ealtimetranscriptionerrordetailmessage.md} | 2 +- docs/models/{response1.md => response.md} | 2 +- docs/models/responsedoneevent.md | 10 +- docs/models/responsedoneeventtype.md | 8 - docs/models/responseerrorevent.md | 12 +- docs/models/responseerroreventtype.md | 8 - ...esponseretrievemodelv1modelsmodelidget.md} | 2 +- docs/models/responsestartedevent.md | 10 +- docs/models/responsestartedeventtype.md | 8 - ...nsget.md => responsev1conversationsget.md} | 2 +- docs/models/role.md | 8 - docs/models/systemmessage.md | 2 +- docs/models/toolexecutiondeltaevent.md | 16 +- docs/models/toolexecutiondeltaeventtype.md | 8 - docs/models/toolexecutiondoneevent.md | 16 +- docs/models/toolexecutiondoneeventtype.md | 8 - docs/models/toolexecutionentry.md | 2 +- .../{name.md => toolexecutionentryname.md} | 2 +- docs/models/toolexecutionstartedevent.md | 16 +- docs/models/toolexecutionstartedeventtype.md | 8 - docs/models/toolmessage.md | 2 +- docs/models/toolmessagerole.md | 8 - docs/models/transcriptionsegmentchunk.md | 18 +- ...pe.md => transcriptionsegmentchunktype.md} | 2 +- docs/models/transcriptionstreamdone.md | 18 +- docs/models/transcriptionstreamdonetype.md | 8 - docs/models/transcriptionstreamlanguage.md | 10 +- .../models/transcriptionstreamlanguagetype.md | 8 - .../models/transcriptionstreamsegmentdelta.md | 16 +- .../transcriptionstreamsegmentdeltatype.md | 8 - docs/models/transcriptionstreamtextdelta.md | 10 +- .../transcriptionstreamtextdeltatype.md | 8 - docs/models/usermessage.md | 2 +- docs/models/usermessagerole.md | 8 - docs/models/wandbintegration.md | 2 +- docs/models/wandbintegrationout.md | 14 +- docs/models/wandbintegrationouttype.md | 8 - docs/models/wandbintegrationtype.md | 8 - docs/models/websearchpremiumtool.md | 6 +- docs/models/websearchpremiumtooltype.md | 8 - docs/models/websearchtool.md | 6 +- docs/models/websearchtooltype.md | 8 - docs/sdks/agents/README.md | 4 +- .../sdks/{mistraljobs => batchjobs}/README.md | 0 .../{mistralagents => betaagents}/README.md | 58 +- docs/sdks/chat/README.md | 6 +- docs/sdks/classifiers/README.md | 10 +- docs/sdks/conversations/README.md | 38 +- docs/sdks/{jobs => finetuningjobs}/README.md | 30 +- docs/sdks/models/README.md | 2 +- examples/azure/az_chat_no_streaming.py | 15 + examples/azure/az_chat_no_streaming.py.py | 16 - examples/azure/chat_no_streaming.py | 15 + examples/azure/chat_no_streaming.py.py | 16 - .../mistral/chat/chatbot_with_streaming.py | 3 +- examples/mistral/chat/function_calling.py | 69 +- .../mistral/classifier/async_classifier.py | 9 +- pyproject.toml | 2 +- scripts/lint_custom_code.sh | 5 +- src/mistralai/client/_version.py | 4 +- src/mistralai/client/agents.py | 24 +- src/mistralai/client/batch.py | 6 +- .../client/{mistral_jobs.py => batch_jobs.py} | 2 +- src/mistralai/client/beta.py | 6 +- .../{mistral_agents.py => beta_agents.py} | 34 +- src/mistralai/client/chat.py | 40 +- src/mistralai/client/classifiers.py | 16 +- src/mistralai/client/conversations.py | 68 +- src/mistralai/client/documents.py | 16 +- src/mistralai/client/files.py | 16 +- src/mistralai/client/fine_tuning.py | 6 +- .../client/{jobs.py => fine_tuning_jobs.py} | 30 +- src/mistralai/client/models/__init__.py | 674 ++++++--------- src/mistralai/client/models/agent.py | 27 +- .../client/models/agentcreationrequest.py | 27 +- .../client/models/agenthandoffdoneevent.py | 18 +- .../client/models/agenthandoffstartedevent.py | 18 +- .../models/agents_api_v1_agents_getop.py | 12 +- .../agents_api_v1_conversations_getop.py | 9 +- .../agents_api_v1_conversations_listop.py | 8 +- .../client/models/agentscompletionrequest.py | 10 +- .../models/agentscompletionstreamrequest.py | 10 +- .../client/models/agentupdaterequest.py | 27 +- src/mistralai/client/models/audiochunk.py | 19 +- src/mistralai/client/models/basemodelcard.py | 8 +- src/mistralai/client/models/batchjobstatus.py | 22 +- .../client/models/builtinconnectors.py | 18 +- .../client/models/chatcompletionchoice.py | 6 +- .../client/models/chatcompletionrequest.py | 22 +- .../models/chatcompletionstreamrequest.py | 10 +- .../client/models/chatmoderationrequest.py | 30 +- .../client/models/classifierdetailedjobout.py | 56 +- .../client/models/classifierftmodelout.py | 16 +- .../client/models/classifierjobout.py | 57 +- .../client/models/codeinterpretertool.py | 20 +- .../client/models/completiondetailedjobout.py | 64 +- .../client/models/completionftmodelout.py | 16 +- .../client/models/completionjobout.py | 69 +- .../client/models/conversationevents.py | 25 +- .../client/models/conversationhistory.py | 12 +- .../client/models/conversationrequest.py | 45 +- .../client/models/conversationresponse.py | 12 +- .../models/conversationstreamrequest.py | 27 +- src/mistralai/client/models/deltamessage.py | 12 +- .../client/models/documentlibrarytool.py | 20 +- .../models/files_api_routes_upload_fileop.py | 4 +- .../client/models/ftclassifierlossfunction.py | 12 +- src/mistralai/client/models/ftmodelcard.py | 10 +- .../client/models/functioncallevent.py | 18 +- src/mistralai/client/models/functiontool.py | 17 +- .../client/models/githubrepositoryin.py | 17 +- .../client/models/githubrepositoryout.py | 17 +- .../client/models/imagegenerationtool.py | 20 +- src/mistralai/client/models/imageurlchunk.py | 10 +- src/mistralai/client/models/inputs.py | 10 +- .../client/models/instructrequest.py | 10 +- src/mistralai/client/models/jobin.py | 16 +- ...es_fine_tuning_cancel_fine_tuning_jobop.py | 11 +- ...es_fine_tuning_create_fine_tuning_jobop.py | 19 +- ...outes_fine_tuning_get_fine_tuning_jobop.py | 11 +- ...utes_fine_tuning_get_fine_tuning_jobsop.py | 6 +- ...tes_fine_tuning_start_fine_tuning_jobop.py | 11 +- ...s_fine_tuning_update_fine_tuned_modelop.py | 15 +- src/mistralai/client/models/jobsout.py | 9 +- .../models/libraries_documents_upload_v1op.py | 8 +- .../client/models/messageinputentry.py | 16 +- .../client/models/messageoutputevent.py | 19 +- .../client/models/modelconversation.py | 27 +- src/mistralai/client/models/modellist.py | 18 +- src/mistralai/client/models/ocrtableobject.py | 13 +- .../realtimetranscriptionerrordetail.py | 12 +- .../client/models/responsedoneevent.py | 18 +- .../client/models/responseerrorevent.py | 18 +- .../client/models/responseformats.py | 14 +- .../client/models/responsestartedevent.py | 18 +- ...retrieve_model_v1_models_model_id_getop.py | 15 +- src/mistralai/client/models/ssetypes.py | 28 +- src/mistralai/client/models/systemmessage.py | 17 +- src/mistralai/client/models/toolchoiceenum.py | 16 +- .../client/models/toolexecutiondeltaevent.py | 18 +- .../client/models/toolexecutiondoneevent.py | 18 +- .../client/models/toolexecutionentry.py | 12 +- .../models/toolexecutionstartedevent.py | 18 +- src/mistralai/client/models/toolmessage.py | 19 +- .../models/transcriptionsegmentchunk.py | 6 +- .../client/models/transcriptionstreamdone.py | 19 +- .../models/transcriptionstreamevents.py | 13 +- .../models/transcriptionstreameventtypes.py | 16 +- .../models/transcriptionstreamlanguage.py | 19 +- .../models/transcriptionstreamsegmentdelta.py | 21 +- .../models/transcriptionstreamtextdelta.py | 19 +- src/mistralai/client/models/usermessage.py | 19 +- .../client/models/wandbintegration.py | 19 +- .../client/models/wandbintegrationout.py | 19 +- .../client/models/websearchpremiumtool.py | 20 +- src/mistralai/client/models/websearchtool.py | 17 +- src/mistralai/client/models_.py | 10 +- src/mistralai/extra/mcp/base.py | 1 - src/mistralai/extra/run/context.py | 20 +- src/mistralai/extra/run/tools.py | 1 - uv.lock | 2 +- 272 files changed, 2960 insertions(+), 2658 deletions(-) create mode 100644 .speakeasy/generated-files-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock create mode 100644 docs/models/agentcreationrequesttool.md delete mode 100644 docs/models/agenthandoffdoneeventtype.md delete mode 100644 docs/models/agenthandoffstartedeventtype.md rename docs/models/{queryparamagentversion.md => agentsapiv1agentsgetagentversion.md} (79%) rename docs/models/{responsebody.md => agentsapiv1conversationslistresponse.md} (84%) rename docs/models/{instructrequestinputsmessages.md => agentscompletionrequestmessage.md} (92%) rename docs/models/{chatcompletionstreamrequestmessages.md => agentscompletionstreamrequestmessage.md} (90%) delete mode 100644 docs/models/agentscompletionstreamrequestmessages.md rename docs/models/{tools.md => agenttool.md} (98%) rename docs/models/{modelconversationtools.md => agentupdaterequesttool.md} (96%) delete mode 100644 docs/models/audiochunktype.md delete mode 100644 docs/models/basemodelcardtype.md rename docs/models/{finishreason.md => chatcompletionchoicefinishreason.md} (88%) rename docs/models/{one.md => chatcompletionrequestmessage.md} (92%) rename docs/models/{stop.md => chatcompletionrequeststop.md} (90%) rename docs/models/{agentscompletionrequestmessages.md => chatcompletionstreamrequestmessage.md} (91%) delete mode 100644 docs/models/chatmoderationrequestinputs.md rename docs/models/{instructrequestmessages.md => chatmoderationrequestinputs1.md} (92%) create mode 100644 docs/models/chatmoderationrequestinputs2.md create mode 100644 docs/models/chatmoderationrequestinputs3.md rename docs/models/{completiondetailedjoboutintegrations.md => classifierdetailedjoboutintegration.md} (76%) delete mode 100644 docs/models/classifierdetailedjoboutjobtype.md delete mode 100644 docs/models/classifierftmodeloutmodeltype.md rename docs/models/{integrations.md => classifierjoboutintegration.md} (80%) delete mode 100644 docs/models/classifierjoboutjobtype.md delete mode 100644 docs/models/codeinterpretertooltype.md rename docs/models/{classifierdetailedjoboutintegrations.md => completiondetailedjoboutintegration.md} (76%) delete mode 100644 docs/models/completiondetailedjoboutjobtype.md rename docs/models/{completiondetailedjoboutrepositories.md => completiondetailedjoboutrepository.md} (76%) rename docs/models/{classifierjoboutintegrations.md => completionjoboutintegration.md} (80%) rename docs/models/{repositories.md => completionjoboutrepository.md} (81%) rename docs/models/{status.md => completionjoboutstatus.md} (96%) rename docs/models/{agentversion.md => conversationrequestagentversion.md} (80%) rename docs/models/{handoffexecution.md => conversationrequesthandoffexecution.md} (73%) rename docs/models/{agentupdaterequesttools.md => conversationrequesttool.md} (95%) rename docs/models/{agentcreationrequesttools.md => conversationstreamrequesttool.md} (95%) delete mode 100644 docs/models/conversationstreamrequesttools.md rename docs/models/{content.md => deltamessagecontent.md} (89%) delete mode 100644 docs/models/documentlibrarytooltype.md rename docs/models/{librariesdocumentsuploadv1documentupload.md => documentupload.md} (98%) rename docs/models/{entries.md => entry.md} (98%) delete mode 100644 docs/models/ftmodelcardtype.md delete mode 100644 docs/models/functioncalleventtype.md delete mode 100644 docs/models/functiontooltype.md delete mode 100644 docs/models/githubrepositoryintype.md delete mode 100644 docs/models/githubrepositoryouttype.md delete mode 100644 docs/models/imagegenerationtooltype.md rename docs/models/{imageurlchunkimageurl.md => imageurlunion.md} (86%) rename docs/models/{messages.md => inputsmessage.md} (96%) rename docs/models/{two.md => instructrequestmessage.md} (93%) rename docs/models/{jobinintegrations.md => jobinintegration.md} (85%) rename docs/models/{jobinrepositories.md => jobinrepository.md} (86%) rename docs/models/{queryparamstatus.md => jobsapiroutesfinetuninggetfinetuningjobsstatus.md} (94%) delete mode 100644 docs/models/jobtype.md rename docs/models/{object.md => messageinputentryobject.md} (75%) delete mode 100644 docs/models/messageoutputeventtype.md rename docs/models/{agenttools.md => modelconversationtool.md} (96%) rename docs/models/{data.md => modellistdata.md} (92%) delete mode 100644 docs/models/modeltype.md rename docs/models/{filesapiroutesuploadfilemultipartbodyparams.md => multipartbodyparams.md} (99%) rename docs/models/{outputs.md => output.md} (97%) rename docs/models/{message.md => realtimetranscriptionerrordetailmessage.md} (81%) rename docs/models/{response1.md => response.md} (94%) delete mode 100644 docs/models/responsedoneeventtype.md delete mode 100644 docs/models/responseerroreventtype.md rename docs/models/{retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md => responseretrievemodelv1modelsmodelidget.md} (75%) delete mode 100644 docs/models/responsestartedeventtype.md rename docs/models/{agentsapiv1conversationsgetresponsev1conversationsget.md => responsev1conversationsget.md} (81%) delete mode 100644 docs/models/role.md delete mode 100644 docs/models/toolexecutiondeltaeventtype.md delete mode 100644 docs/models/toolexecutiondoneeventtype.md rename docs/models/{name.md => toolexecutionentryname.md} (87%) delete mode 100644 docs/models/toolexecutionstartedeventtype.md delete mode 100644 docs/models/toolmessagerole.md rename docs/models/{type.md => transcriptionsegmentchunktype.md} (84%) delete mode 100644 docs/models/transcriptionstreamdonetype.md delete mode 100644 docs/models/transcriptionstreamlanguagetype.md delete mode 100644 docs/models/transcriptionstreamsegmentdeltatype.md delete mode 100644 docs/models/transcriptionstreamtextdeltatype.md delete mode 100644 docs/models/usermessagerole.md delete mode 100644 docs/models/wandbintegrationouttype.md delete mode 100644 docs/models/wandbintegrationtype.md delete mode 100644 docs/models/websearchpremiumtooltype.md delete mode 100644 docs/models/websearchtooltype.md rename docs/sdks/{mistraljobs => batchjobs}/README.md (100%) rename docs/sdks/{mistralagents => betaagents}/README.md (78%) rename docs/sdks/{jobs => finetuningjobs}/README.md (83%) create mode 100644 examples/azure/az_chat_no_streaming.py delete mode 100644 examples/azure/az_chat_no_streaming.py.py create mode 100644 examples/azure/chat_no_streaming.py delete mode 100644 examples/azure/chat_no_streaming.py.py rename src/mistralai/client/{mistral_jobs.py => batch_jobs.py} (99%) rename src/mistralai/client/{mistral_agents.py => beta_agents.py} (99%) rename src/mistralai/client/{jobs.py => fine_tuning_jobs.py} (98%) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 345ea2c8..69828bd7 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -62,7 +62,7 @@ trackedFiles: pristine_git_object: 1810386448a440cfc5f7b8579695b228ae40460d docs/models/agent.md: id: ffdbb4c53c87 - last_write_checksum: sha1:ec6c799658040b3c75d6ae0572bb391c6aea3fd4 + last_write_checksum: sha1:26d2fb743d3fdd54a6ab1258a37f08d1726927ac pristine_git_object: ee054dd349848eff144d7064c319c3c8434bdc6c docs/models/agentaliasresponse.md: id: 5ac4721d8947 @@ -82,20 +82,14 @@ trackedFiles: pristine_git_object: ea7cc75c5197ed42f9fb508a969baa16effe1f98 docs/models/agentcreationrequest.md: id: 697a770fe5c0 - last_write_checksum: sha1:c8221a20a68675b444d668a58a649b25b54786e9 + last_write_checksum: sha1:d77c75f922c64df266b101a2fd23c7fe56b7894b pristine_git_object: afc27d3b688f9ca187606243c810fd19d12bb840 - docs/models/agentcreationrequesttools.md: - id: 932bf99a19a8 - last_write_checksum: sha1:49294bdd30b7413956bd8dc039ad7c9d15243282 - pristine_git_object: c2525850649b4dad76b44fd21cac822e12986818 + docs/models/agentcreationrequesttool.md: + last_write_checksum: sha1:310d4b107554a9c16143191fdc306a5438b63768 docs/models/agenthandoffdoneevent.md: id: dcf166a3c3b0 - last_write_checksum: sha1:281473cbc3929e2deb3e069e74551e7e26b4fdba + last_write_checksum: sha1:9e95c09f724827f5e9c202fd634bdfa2baef1b6e pristine_git_object: c0039f41825e3667cd8e91adae5bb78a2e3ac8ae - docs/models/agenthandoffdoneeventtype.md: - id: 4d412ea3af67 - last_write_checksum: sha1:720ebe2c6029611b8ecd4caa1b5a58d6417251c6 - pristine_git_object: c864ce4381eb30532feb010b39b991a2070f134b docs/models/agenthandoffentry.md: id: 39d54f489b84 last_write_checksum: sha1:7d949e750fd24dea20cabae340f9204d8f756008 @@ -110,12 +104,8 @@ trackedFiles: pristine_git_object: 527ebceb2ff1bbba1067f30438befd5e2c2e91d6 docs/models/agenthandoffstartedevent.md: id: b620102af460 - last_write_checksum: sha1:a635a7f57e197519d6c51349f6db44199f8e0d43 + last_write_checksum: sha1:33732e0465423348c2ace458506a597a3dadf9b2 pristine_git_object: 035cd02aaf338785d9f6410fde248591c5ffa5f7 - docs/models/agenthandoffstartedeventtype.md: - id: 09b09b971d58 - last_write_checksum: sha1:a3cf06d2c414b1609bdbbbd9e35c8d3f14af262a - pristine_git_object: 4ffaff15cd7b5d4b08080c4fb78e92c455c73f35 docs/models/agentobject.md: id: ed24a6d647a0 last_write_checksum: sha1:ff5dfde6cc19f09c83afb5b4f0f103096df6691d @@ -128,9 +118,11 @@ trackedFiles: id: 0faaaa59add9 last_write_checksum: sha1:2a34269e682bb910b83814b4d730ba2ce07f8cb2 pristine_git_object: 2799f41817ab0f7a22b49b4ff895c8308525953c + docs/models/agentsapiv1agentsgetagentversion.md: + last_write_checksum: sha1:e4f4c6a64b1c2ec9465b7ad008df4d7859098e59 docs/models/agentsapiv1agentsgetrequest.md: id: 01740ae62cff - last_write_checksum: sha1:9c4f6d88f29c39238757547da605ecb7106e76c2 + last_write_checksum: sha1:bc86e90289ec09b40212083a82455b4fe71c7194 pristine_git_object: c71d4419afd3b51713e154b8021d4fe2b49d8af5 docs/models/agentsapiv1agentsgetversionrequest.md: id: 88ed22b85cde @@ -172,10 +164,6 @@ trackedFiles: id: d6acce23f92c last_write_checksum: sha1:b5d5529b72c16293d3d9b5c45dcb2e3798405bcf pristine_git_object: 67d450c88778cb27d7d0ba06d49d9f419840b32e - docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md: - id: 97b0d4a71cbc - last_write_checksum: sha1:8d3df6d122eeb58043c81e30cfa701526cc572f0 - pristine_git_object: 4bc836f353f66b0f8b24f278cc78d41dbec72e36 docs/models/agentsapiv1conversationshistoryrequest.md: id: e3efc36ea8b5 last_write_checksum: sha1:4155100eaed6d3b7410b3f4476f000d1879576be @@ -184,6 +172,8 @@ trackedFiles: id: 406c3e92777a last_write_checksum: sha1:d5c5effcf2ca32900678d20b667bdaf8ca908194 pristine_git_object: 62c9011faf26b3a4268186f01caf98c186e7d5b4 + docs/models/agentsapiv1conversationslistresponse.md: + last_write_checksum: sha1:1144f41f8a97daacfb75c11fdf3575e553cf0859 docs/models/agentsapiv1conversationsmessagesrequest.md: id: 2c749c6620d4 last_write_checksum: sha1:781e526b030653dc189d94ca04cdc4742f9506d2 @@ -198,12 +188,10 @@ trackedFiles: pristine_git_object: 7548286af5d1db51fbfd29c893eb8afdc3c97c4d docs/models/agentscompletionrequest.md: id: 906b82c214dc - last_write_checksum: sha1:60a969d5e54cbbb8e9296380908f1d31544e80e2 + last_write_checksum: sha1:84ee0378e413830260a279a67fc3b1342e643328 pristine_git_object: 2a0c4144fb5919e5ce892db1210bde90820c127c - docs/models/agentscompletionrequestmessages.md: - id: 152837715a56 - last_write_checksum: sha1:338b094596f610c6eacaf0995c585f371f628f0d - pristine_git_object: d6a1e69106fc4b4804bfcc0f95e30782be40b363 + docs/models/agentscompletionrequestmessage.md: + last_write_checksum: sha1:ecf7b7cdf0d24a5e97b520366cf816b8731734bb docs/models/agentscompletionrequeststop.md: id: ad1e0e74b6b8 last_write_checksum: sha1:b2422d4dada80d54b2dd499a6659a3894318d2c9 @@ -214,12 +202,10 @@ trackedFiles: pristine_git_object: 63b9dca9fbb8d829f93d8327a77fbc385a846c76 docs/models/agentscompletionstreamrequest.md: id: 21d09756447b - last_write_checksum: sha1:97372c5a10b06f826b9da6bde2b9c5f6984cc15b + last_write_checksum: sha1:0c88bc63255733480b65b61685dcc356fcc9ed66 pristine_git_object: b2ccd4e8fe2fc3f63d4b517f7ecfc21f3aef9d67 - docs/models/agentscompletionstreamrequestmessages.md: - id: d527345f99b1 - last_write_checksum: sha1:a5e00a940960bd6751586b92329aea797af50550 - pristine_git_object: 1bc736af55a3582a18959e445f10fc75f050476b + docs/models/agentscompletionstreamrequestmessage.md: + last_write_checksum: sha1:98744c9646969250242cbbfbdf428dbd7030e4bb docs/models/agentscompletionstreamrequeststop.md: id: 4925b6b8fbca last_write_checksum: sha1:c9d0d73ca46643ffdf02e6c6cd35de5c39460c20 @@ -228,22 +214,14 @@ trackedFiles: id: b1f76f7a4e1c last_write_checksum: sha1:843c4946d5cab61df2cba458af40835c4e8bcafe pristine_git_object: 4354523a7d0d21721a96e91938b89236169ccced - docs/models/agenttools.md: - id: 493997aabfdb - last_write_checksum: sha1:90e3537a61b4120892a3aafe545d6bed937bf46a - pristine_git_object: 15891f566b3430e1f199da332f4531dd29002bed + docs/models/agenttool.md: + last_write_checksum: sha1:9154d0ac6b0ab8970a10a8ad7716009d62e80ce7 docs/models/agentupdaterequest.md: id: 75a7f820b906 - last_write_checksum: sha1:d282d1cd39ecb3c447e651a9ea25010ecfa519f7 + last_write_checksum: sha1:306134659876c4e87324dfec879ab0b691a74f3a pristine_git_object: 641d1e406f0fba0fce9f10c16a15f883c7095c07 - docs/models/agentupdaterequesttools.md: - id: a39223b88fc9 - last_write_checksum: sha1:925ef5852c2031c9bf2608577e55edbc36708730 - pristine_git_object: 1752ee6861d23c6abaa6b748f4ff43e9545505ec - docs/models/agentversion.md: - id: b0aa02d6c085 - last_write_checksum: sha1:f6fcf351de43eed5345f88f5cb6a2bf928a594d9 - pristine_git_object: fd4b6a3ea4ade6c9f62594b377c8e791a50211e7 + docs/models/agentupdaterequesttool.md: + last_write_checksum: sha1:25d8a331a706bf8e6056b99f8ff1a46abff6ae72 docs/models/apiendpoint.md: id: be613fd9b947 last_write_checksum: sha1:4d984c11248f7da42c949164e69b53995d5942c4 @@ -278,12 +256,8 @@ trackedFiles: pristine_git_object: 147708d9238e40e1cdb222beee15fbe8c1603050 docs/models/audiochunk.md: id: 88315a758fd4 - last_write_checksum: sha1:deae67e30f57eb9ae100d8c3bc26f77e8fb28396 + last_write_checksum: sha1:d52e493765280fc0b1df61a0ce1086205965c712 pristine_git_object: c443e7ade726ba88dd7ce9a8341687ef38abe598 - docs/models/audiochunktype.md: - id: cfdd0b7a74b3 - last_write_checksum: sha1:aaafb6be2f880e23fc29958389c44fd60e85f5e4 - pristine_git_object: 46ebf3729db50fd915e56124adcf63a09d93dbf4 docs/models/audioencoding.md: id: 1e0dfee9c2a0 last_write_checksum: sha1:5d47cfaca916d7a47adbea71748595b3ab69a478 @@ -302,12 +276,8 @@ trackedFiles: pristine_git_object: 5d64964d1a635da912f2553c306fb8654ebfca2e docs/models/basemodelcard.md: id: 2f62bfbd650e - last_write_checksum: sha1:7ee94bd9ceb6af84024863aa8183540bee7ffcce + last_write_checksum: sha1:4b29e0d24060b6724e82aeee05befe1cddb316f4 pristine_git_object: 58ad5e25131804287b5f7c834afc3ad480d065a9 - docs/models/basemodelcardtype.md: - id: ac404098e2ff - last_write_checksum: sha1:b20b34e9a5f2f52d0563d8fbfa3d00042817ce87 - pristine_git_object: 4a40ce76799b5c224c5687287e8fc14857999d85 docs/models/batcherror.md: id: 8053e29a3f26 last_write_checksum: sha1:23a12dc2e95f92a7a3691bd65a1b05012c669f0f @@ -350,12 +320,18 @@ trackedFiles: pristine_git_object: 910d62ae20fc67e9a3200397aeab95513bfed90f docs/models/chatcompletionchoice.md: id: 0d15c59ab501 - last_write_checksum: sha1:449b3e772891ec8d2ef77b6959a437514bb48d9c + last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 pristine_git_object: d77d286eb0b2d2b018b6ff5f9617225be4fa9fa5 + docs/models/chatcompletionchoicefinishreason.md: + last_write_checksum: sha1:b894d3408cb801e072c3c302a5676ff939d59284 docs/models/chatcompletionrequest.md: id: adffe90369d0 - last_write_checksum: sha1:7dce1fcd0918e2c94ad90337fb7a89179a5b8402 + last_write_checksum: sha1:f6eec11c908ee6581e508fff98e785441c4b84ad pristine_git_object: 109fa7b13d19ccc85e4633e64b44613640c171fb + docs/models/chatcompletionrequestmessage.md: + last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 + docs/models/chatcompletionrequeststop.md: + last_write_checksum: sha1:71a25f84f0d88c7acf72e801ced6159546201851 docs/models/chatcompletionrequesttoolchoice.md: id: b97041b2f15b last_write_checksum: sha1:7ad7eb133f70e07d0d6a9def36aadd08b35cf861 @@ -366,12 +342,10 @@ trackedFiles: pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b docs/models/chatcompletionstreamrequest.md: id: cf8f29558a68 - last_write_checksum: sha1:6f3ca8df1ce48dceb72547012a3e973e09a16d61 + last_write_checksum: sha1:7ed921e0366c1b00225c05e60937fb8d228f027b pristine_git_object: 7d5fb411bde92e39910018cc2ad8d4d67ea980a1 - docs/models/chatcompletionstreamrequestmessages.md: - id: b343649e1a58 - last_write_checksum: sha1:04ea9c0e1abcc1956a5990847027bbbbcc778620 - pristine_git_object: 479906112d167c909301c1835df549f4a6456f95 + docs/models/chatcompletionstreamrequestmessage.md: + last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 docs/models/chatcompletionstreamrequeststop.md: id: d0e89a4dca78 last_write_checksum: sha1:a889e9580fa94bda7c848682d6ba501b7f5c0f41 @@ -382,12 +356,14 @@ trackedFiles: pristine_git_object: 43f3ca3809bf1a2a040e2ad7c19a2b22db0b73f8 docs/models/chatmoderationrequest.md: id: 22862d4d20ec - last_write_checksum: sha1:2fb708270756e1296a063b0d12252e7a5b2fb92a + last_write_checksum: sha1:9bbe510ee67515092bd953ad7f84ae118398af54 pristine_git_object: 69b6c1dc2c10abbbc2574f3782b2d85687661f11 - docs/models/chatmoderationrequestinputs.md: - id: 6d7386a07f09 - last_write_checksum: sha1:f95cffb7d88cfa238a483c949af2d386f875def2 - pristine_git_object: cf775d609e5d308ffb041deed7a70ae3f7fd70a7 + docs/models/chatmoderationrequestinputs1.md: + last_write_checksum: sha1:8d4c2dbd9207589aabf9c00cf60c61d2d3eef452 + docs/models/chatmoderationrequestinputs2.md: + last_write_checksum: sha1:e34eb6557e06e7783ed14d959c2a29959c26fd4c + docs/models/chatmoderationrequestinputs3.md: + last_write_checksum: sha1:14ce49ace5845bc467fe1559b12374bfd36bc9a7 docs/models/checkpointout.md: id: 909ce66e1f65 last_write_checksum: sha1:89e678d55b97353ad1c3b28d9f1ab101f6be0928 @@ -410,16 +386,10 @@ trackedFiles: pristine_git_object: f3b10727b023dd83a207d955b3d0f3cd4b7479a1 docs/models/classifierdetailedjobout.md: id: a2084ba5cc8c - last_write_checksum: sha1:75fec933eb83e28b81aa69561d7aaf0fb79b869b + last_write_checksum: sha1:63acd8a1921ac99143685722f8812b1f572d451f pristine_git_object: ccc88f89ed81e6e879a88b9729c4945704370fd9 - docs/models/classifierdetailedjoboutintegrations.md: - id: 3c607522e70d - last_write_checksum: sha1:e483390fb183bd1960373e4613a15ab31a52b7c7 - pristine_git_object: 5a09465ece564b1bf4dd323918a20f6747019cac - docs/models/classifierdetailedjoboutjobtype.md: - id: 176bd257be82 - last_write_checksum: sha1:ad0f41bac94d711d2b51b2ec4e09d0155db2b6eb - pristine_git_object: 0d1c6573b925e0ef836f5a607ac24f801e0d72eb + docs/models/classifierdetailedjoboutintegration.md: + last_write_checksum: sha1:6b2691766c1795d17b1572076a693eb377c5307f docs/models/classifierdetailedjoboutobject.md: id: 1ca54621f5bf last_write_checksum: sha1:5ae3d2847a66487d70bc2ff97a8c31bbbba191c7 @@ -430,28 +400,18 @@ trackedFiles: pristine_git_object: c3118aafa8614f20c9adf331033e7822b6391752 docs/models/classifierftmodelout.md: id: 268ac482c38b - last_write_checksum: sha1:77ff5ad1a9c142de2a43939be9cd3f57038a9bfc + last_write_checksum: sha1:dda3d6bf88fb6a3e860821aefb8a522d8a476b1d pristine_git_object: dd9e8bf9c0ee291b44cd4f06146dea3d3280c143 - docs/models/classifierftmodeloutmodeltype.md: - id: 40536012f45c - last_write_checksum: sha1:c6fde7ce8542ba6a56a91584aa0d6b1eb99fde6d - pristine_git_object: e1e7e465378c4c0112f08dc140052fad7955995e docs/models/classifierftmodeloutobject.md: id: 6aa25d9fe076 last_write_checksum: sha1:5a5fe345b3a2b3e65ce3171e8d6e9b9493ec7b06 pristine_git_object: 9fe05bcf42325a390e5c984c7bdf346668944928 docs/models/classifierjobout.md: id: 2e3498af3f8c - last_write_checksum: sha1:a9706e8df1a0a569e5e42e7a1494737e391cb55a + last_write_checksum: sha1:311f6ca4b6b625768c4ddd63e642e14e6a58df23 pristine_git_object: aa1d3ca910535e283059903a2c39331673c1982b - docs/models/classifierjoboutintegrations.md: - id: 3c4aff0af3fd - last_write_checksum: sha1:b843cb1635940ff74737f92ec1ac5da893a239f2 - pristine_git_object: d938d0b991f71e46096a9b12320c6237265bd811 - docs/models/classifierjoboutjobtype.md: - id: 772280dfaefc - last_write_checksum: sha1:b809726c9edd5a47be7582eb028acbd58014b565 - pristine_git_object: 7f5236fa87ea9bb5fd93873a2d2f9a6a8c4f9456 + docs/models/classifierjoboutintegration.md: + last_write_checksum: sha1:72dfda442a88f977f3480c95127534a600362806 docs/models/classifierjoboutobject.md: id: 04543f046d40 last_write_checksum: sha1:96863c621ddf0425b818edcd5da32ddbd5fd1194 @@ -478,12 +438,8 @@ trackedFiles: pristine_git_object: 1287c973fae9762310597fbeceaef26865ace04f docs/models/codeinterpretertool.md: id: f009740c6e54 - last_write_checksum: sha1:bba7c0b8f0979b0c77a31c70621dccb03d6722a5 + last_write_checksum: sha1:bce278ce22703246613254ee2dac57f8b14e8060 pristine_git_object: d5ad789ed012accaa105ced4f8dfd8e9eb83d4a3 - docs/models/codeinterpretertooltype.md: - id: d6d0f83de515 - last_write_checksum: sha1:f41ae23451c22692410340d44bcec36a1f45910b - pristine_git_object: f704b65e2842e36be4d2b96c9334cda4a6b02cde docs/models/completionargs.md: id: 3b54534f9830 last_write_checksum: sha1:c0368b7c21524228939b2093ff1a4524eb57aeb7 @@ -498,24 +454,16 @@ trackedFiles: pristine_git_object: 7f8ab5e631e2c6d1d9830325e591a7e434b83a35 docs/models/completiondetailedjobout.md: id: 634ca7241abd - last_write_checksum: sha1:b0af22a4e5eb409d6aa2a91c4ee3924d38923f5f + last_write_checksum: sha1:e5edf096998b6b8e2048f354bd694288dd609875 pristine_git_object: 84613080715078a73204d3984e7f97477ef548ae - docs/models/completiondetailedjoboutintegrations.md: - id: ecf47529e409 - last_write_checksum: sha1:5ff41070f932c911a724867a91a0a26c1d62032e - pristine_git_object: af6bbcc5f43176df2dea01a4a1a31f3c616ee3b9 - docs/models/completiondetailedjoboutjobtype.md: - id: cb794f29a3f2 - last_write_checksum: sha1:24533bc2a5bb42b560f02af4d93f008f9e5b7873 - pristine_git_object: fb24db0cc3d9495f01732bdb0e1c3df8a5865540 + docs/models/completiondetailedjoboutintegration.md: + last_write_checksum: sha1:3317db3f71962623a6144e3de0db20b4abfd5b9b docs/models/completiondetailedjoboutobject.md: id: 8e418065aa1c last_write_checksum: sha1:d429d772a6a4249809bbf0c26a6547e5f2de3f11 pristine_git_object: 1bec88e5f4c5f082c53157b8ee95b4b05cb787e3 - docs/models/completiondetailedjoboutrepositories.md: - id: bb83e77df490 - last_write_checksum: sha1:dc2d60c6be1d3385d584ce9629abaaaaa46cf0ef - pristine_git_object: 4f9727c36fac5515d0afbc801904abc3652a5b20 + docs/models/completiondetailedjoboutrepository.md: + last_write_checksum: sha1:b1910efc6cd1e50391cd33daef004441bac3d3cd docs/models/completiondetailedjoboutstatus.md: id: c606d38452e2 last_write_checksum: sha1:1e9a5736de32a44cf539f7eaf8214aad72ec4994 @@ -526,7 +474,7 @@ trackedFiles: pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d docs/models/completionftmodelout.md: id: 93fed66a5794 - last_write_checksum: sha1:c66aecd2e10f79c84c057eeae1986e975cb40220 + last_write_checksum: sha1:17c4ed9718d6556ddb103cff5a5823c3baa18f41 pristine_git_object: cd0858258521ced3990ff393fd00c11ef0abe094 docs/models/completionftmodeloutobject.md: id: c6e5667c5f03 @@ -534,12 +482,18 @@ trackedFiles: pristine_git_object: 6f9d858caa563f4a25ae752dd40ba632ecd0af75 docs/models/completionjobout.md: id: 77315b024171 - last_write_checksum: sha1:bae2f49bb9064e24f886487e44ce1688993fa949 + last_write_checksum: sha1:1070ddeaef67a65f27a365a57d343a83b4b40aca pristine_git_object: cb471746c4f23d2ec8451f4c45bf57e2f001072f + docs/models/completionjoboutintegration.md: + last_write_checksum: sha1:59711a3fa46d6a4bff787a61c81ecc34bdaaec2e docs/models/completionjoboutobject.md: id: 922a1e3a4e33 last_write_checksum: sha1:020211def2c4cd969398cf009b187ca19bd7a943 pristine_git_object: 712b107d79a8c60c4330da4f3af307545bf1a7ec + docs/models/completionjoboutrepository.md: + last_write_checksum: sha1:2cb5b23640eeaf87f45dc9f180247ed7a6307df7 + docs/models/completionjoboutstatus.md: + last_write_checksum: sha1:b8f33134c63b12dc474e7714b1ac19d768a3cbbd docs/models/completionresponsestreamchoice.md: id: d56824d615a6 last_write_checksum: sha1:0296a490df009dbfd04893fdebcc88dd6102a872 @@ -556,10 +510,6 @@ trackedFiles: id: 7223a57004ab last_write_checksum: sha1:8f77e5fe2ce149115b0bda372c57fafa931abd90 pristine_git_object: 9fcc714e5f000e6134f7f03f1dd4f56956323385 - docs/models/content.md: - id: bfd859c99f86 - last_write_checksum: sha1:6673dbd19871a701955a322348a4f7e51c38ffc8 - pristine_git_object: a833dc2c6043e36b85131c9243b4cc02b9fcc4c6 docs/models/contentchunk.md: id: d2d3a32080cd last_write_checksum: sha1:b253e4b802adb5b66d896bfc6245ac4d21a0c67c @@ -590,7 +540,7 @@ trackedFiles: pristine_git_object: 5452d7d5ce2aa59a6d89c7b7363290e91ed8a0a3 docs/models/conversationhistory.md: id: 7e97e8e6d6e9 - last_write_checksum: sha1:cc6b40d6e6ff923555e959be5ef50a00c73154a7 + last_write_checksum: sha1:06df76a87aca7c5acd5a28ca3306be09a8bb541b pristine_git_object: ebb1d5136cebf2bc9b77047fe83feecc68532d03 docs/models/conversationhistoryobject.md: id: 088f7df6b658 @@ -610,11 +560,17 @@ trackedFiles: pristine_git_object: db3a441bde0d086bccda4814ddfbf737539681a6 docs/models/conversationrequest.md: id: dd7f4d6807f2 - last_write_checksum: sha1:33dec32dbf20979ac04763e99a82e90ee474fef4 + last_write_checksum: sha1:e4da423f9eb7a8a5d0c21948b50e8df08a63552c pristine_git_object: 2b4ff8ef3398561d9b3e192a51ec22f64880389c + docs/models/conversationrequestagentversion.md: + last_write_checksum: sha1:fd2e9cd7ed2499b5843c592505ec5e0596a50b33 + docs/models/conversationrequesthandoffexecution.md: + last_write_checksum: sha1:f7df210a46acf24abb1312123aebe9e595a190e8 + docs/models/conversationrequesttool.md: + last_write_checksum: sha1:69d503d73f5bd044882d13cd0c7de188dd5f4831 docs/models/conversationresponse.md: id: 2eccf42d48af - last_write_checksum: sha1:69059d02d5354897d23c9d9654d38a85c7e0afc6 + last_write_checksum: sha1:17ebabdf1dd191eeac442046511c44120dfa97a1 pristine_git_object: 38cdadd0055d457fa371984eabcba7782e130839 docs/models/conversationresponseobject.md: id: 6c028b455297 @@ -646,7 +602,7 @@ trackedFiles: pristine_git_object: 97266b43444f5ed50eeedf574abd99cb201199fd docs/models/conversationstreamrequest.md: id: 833f266c4f96 - last_write_checksum: sha1:8d7400dcdb9525c2e45bdaa495df6ca7dcf7f992 + last_write_checksum: sha1:5cb58852d393eb6cc504b45d8b238fc2f3eecd2a pristine_git_object: 299346f8aaa8ccddcbf7fd083389b74346ef2d4f docs/models/conversationstreamrequestagentversion.md: id: e99ccc842929 @@ -656,18 +612,12 @@ trackedFiles: id: e6701e5f9f0c last_write_checksum: sha1:ef2ebe8f23f27144e7403f0a522326a7e4f25f50 pristine_git_object: c98e194c1d204c3a5d4234f0553712a7025d7f85 - docs/models/conversationstreamrequesttools.md: - id: 83ea0526da4e - last_write_checksum: sha1:c445fc14cbb882871a83990943569bdf09a662f9 - pristine_git_object: 700c844876754e85428898f6cabda8fb0dedf114 + docs/models/conversationstreamrequesttool.md: + last_write_checksum: sha1:f2882742a74dd2b4f74383efa444c7ab968249dc docs/models/conversationusageinfo.md: id: 57ef89d3ab83 last_write_checksum: sha1:d92408ad37d7261b0f83588e6216871074a50225 pristine_git_object: 57e260335959c605a0b9b4eaa8bf1f8272f73ae0 - docs/models/data.md: - id: 9a31987caf78 - last_write_checksum: sha1:da040f995f799c04214eff92982dd8d6c057ae93 - pristine_git_object: 95dc8d28aa4669513ae0f255c81aadaf3d793370 docs/models/deletefileout.md: id: c7b84242a45c last_write_checksum: sha1:f2b039ab88fc83ec5dd765cab8e2ed8cce7e417d @@ -682,20 +632,18 @@ trackedFiles: pristine_git_object: d9bc15fe393388f7d0c41abce97ead17e35e2ba4 docs/models/deltamessage.md: id: 6c5ed6b60968 - last_write_checksum: sha1:c213149256c620715d744c89685d5b6cbdea6f58 + last_write_checksum: sha1:00052476b9b2474dbc149f18dd18c71c86d0fc74 pristine_git_object: 61deabbf7e37388fdd4c1789089d120cc0b937b9 + docs/models/deltamessagecontent.md: + last_write_checksum: sha1:a1211b8cb576ad1358e68983680ee326c3920a5e docs/models/document.md: id: cd1d2a444370 last_write_checksum: sha1:d00a2ac808a0ae83a7b97da87e647ecc8dca9c52 pristine_git_object: 509d43b733d68d462853d9eb52fc913c855dff40 docs/models/documentlibrarytool.md: id: 68083b0ef8f3 - last_write_checksum: sha1:5f21be0a248ff4dedc26908b9ee0039d7ac1421c + last_write_checksum: sha1:470b969fa4983c0e7ad3d513b4b7a4fa8d5f0f41 pristine_git_object: 82315f32b920d32741b2e53bc10e411f74a85602 - docs/models/documentlibrarytooltype.md: - id: 23c5ba5c4b3f - last_write_checksum: sha1:bcb58941aafaca2b8ad6e71425d5f16e881b4f97 - pristine_git_object: ebd420f69a4ace05daa7edd82b9315b2a4354b5f docs/models/documentout.md: id: a69fd1f47711 last_write_checksum: sha1:ed446078e7194a0e44e21ab1af958d6a83597edb @@ -708,6 +656,8 @@ trackedFiles: id: 185ab27259a7 last_write_checksum: sha1:e0faccd04229204968dbc4e8131ee72f81288182 pristine_git_object: 0993886d56868aba6844824f0e0fdf1bdb9d74f6 + docs/models/documentupload.md: + last_write_checksum: sha1:aea0f81009be09b153019abbc01b2918a1ecc1f9 docs/models/documenturlchunk.md: id: 48437d297408 last_write_checksum: sha1:38c3e2ad5353a4632bd827f00419c5d8eb2def54 @@ -744,10 +694,8 @@ trackedFiles: id: 130a2f7038b0 last_write_checksum: sha1:01c3c10e737bcd58be70b437f7ee74632972a983 pristine_git_object: 7c040b382d4c1b6bc63f582566d938be75a5f954 - docs/models/entries.md: - id: 93dc7a28346c - last_write_checksum: sha1:c6c61c922df17562e9ca5d8d2d325579db5c88bc - pristine_git_object: 8e5a20d052c47008b8a399b7fb740bece3b35386 + docs/models/entry.md: + last_write_checksum: sha1:4971db390327db09f88feff5d2b8a0b1e6c5b933 docs/models/eventout.md: id: 9960732c3718 last_write_checksum: sha1:dbc23814b2e54ded4aa014d63510b3a2a3259329 @@ -784,10 +732,6 @@ trackedFiles: id: 2783bfd9c4b9 last_write_checksum: sha1:a1249ef0aedb3056e613078488832c96b91f8cab pristine_git_object: 961bae1f51a4ae9df21b28fd7a5ca91dc7b3888b - docs/models/filesapiroutesuploadfilemultipartbodyparams.md: - id: 558bf53c7b65 - last_write_checksum: sha1:de3f26e8bd89aae0e2c2078b9e1f7f47adccafbd - pristine_git_object: a5dd1174ab987e511d70a0f8fdaefbeaeda18c43 docs/models/fileschema.md: id: 9a05a660399d last_write_checksum: sha1:97987d64285ff3092635754c78ad7b68d863e197 @@ -820,10 +764,6 @@ trackedFiles: id: e16926b57814 last_write_checksum: sha1:52006811b756ff5af865ed6f74838d0903f0ee52 pristine_git_object: 34b24bd4db1ad3f9e77e2c6a45a41d2fbc5cf7fd - docs/models/finishreason.md: - id: 73315c2a39b3 - last_write_checksum: sha1:dc258e82af5babd6efabadb20cd6e2f9663dbb64 - pristine_git_object: 2af53f6e55b74455a696c17ab00ba626a1c3711f docs/models/format_.md: id: a17c22228eda last_write_checksum: sha1:dad6de59fec6378d50356007602e2a0254d8d2e4 @@ -838,12 +778,8 @@ trackedFiles: pristine_git_object: 19690476c64ac7be53f974347c1618730f0013ce docs/models/ftmodelcard.md: id: 15ed6f94deea - last_write_checksum: sha1:2dccc70020274152bb8a76f0f7699694f8683652 + last_write_checksum: sha1:1c560ceaaacc1d109b2997c36de03192dfcda941 pristine_git_object: 35032775db8ae6f4c6fbac309edacd27ee7868af - docs/models/ftmodelcardtype.md: - id: e2ba85c02d1c - last_write_checksum: sha1:f6a718013be6a8cb340f58f1ff7b919217594622 - pristine_git_object: 0b38470b9222df6c51baef2e7e9e10c0156a2e05 docs/models/function.md: id: 416a80fba031 last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511 @@ -870,12 +806,8 @@ trackedFiles: pristine_git_object: 7ea34c5206bdf205d74d8d49c87ddee5607582e9 docs/models/functioncallevent.md: id: cc9f2e603464 - last_write_checksum: sha1:c3a6a7ce8af38d7ba7a2ece48c352eed95edc578 + last_write_checksum: sha1:942d1bed0778ba4738993fcdbefe080934b641d5 pristine_git_object: c25679a5d89745c1e186cdeb72fda490b2f45af2 - docs/models/functioncalleventtype.md: - id: 1aab7a86c5d6 - last_write_checksum: sha1:61d480f424df9a74a615be673cae4dcaf7875d81 - pristine_git_object: 8cf3f03866d72ac710015eec57d6b9caa079022e docs/models/functionname.md: id: 4b3bd62c0f26 last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb @@ -894,32 +826,16 @@ trackedFiles: pristine_git_object: 35c94d8e553e1cb641bef28fec2d8b3576d142f6 docs/models/functiontool.md: id: 5fb499088cdf - last_write_checksum: sha1:f616c6de97a6e0d622b16b99f95c2c5a94661789 + last_write_checksum: sha1:a9a3b6530b1c48a8575402b48cde7b65efb33a7d pristine_git_object: 8c42459304100777cf85416a5c3a984bc0e7a7ca - docs/models/functiontooltype.md: - id: bc0bcbe69ad9 - last_write_checksum: sha1:c0fae17a8e5a9b7240ff16af7eef9fb4782fe983 - pristine_git_object: 9c095625b60f1e2e0fd09b08e3ba315545d6a036 docs/models/githubrepositoryin.md: id: b42209ef8423 - last_write_checksum: sha1:fece86cdee3ba3a5719244a953193ed2f7b982f7 + last_write_checksum: sha1:5ab33fc1b0b5513086b1cae07f416d502441db23 pristine_git_object: 1584152ba934756793d5228d5691c07d3256c7b8 - docs/models/githubrepositoryintype.md: - id: e2f2ca622221 - last_write_checksum: sha1:349dc9c6e4db5ec5394c8649c3b872db3545c182 - pristine_git_object: 63da967cb7a75ec328f9b9fbd1062e43f2cabc07 docs/models/githubrepositoryout.md: id: 0ca86e122722 - last_write_checksum: sha1:f6ffda992af75d3f95751106db1b0f0c82a2eca7 + last_write_checksum: sha1:0e3999cef8a745ae24ac36907b3431bc5103ea6f pristine_git_object: 03f0b2661e46b48489ede1208d9c38c4324b2b35 - docs/models/githubrepositoryouttype.md: - id: f3ab58fa1b0e - last_write_checksum: sha1:8f26cd692f499279b9c4182010d56c75374ed9ec - pristine_git_object: 46c3eefd1d67ea6968a3c7025e6dc27e8f0f1ac5 - docs/models/handoffexecution.md: - id: d0b2e094fa39 - last_write_checksum: sha1:1d8fafc8105b6c15e50620353c0457b629951804 - pristine_git_object: 61e7dade49090096a49d99b5c8291f629fd43c4e docs/models/httpvalidationerror.md: id: a211c095f2ac last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e @@ -930,28 +846,22 @@ trackedFiles: pristine_git_object: 46a6dd6baa1b1574bad5eadc1e83d4b72d56c0c8 docs/models/imagegenerationtool.md: id: d5deb6b06d28 - last_write_checksum: sha1:8596d0119712e68b1deafd18860ed6ed452a31fa + last_write_checksum: sha1:b3decee8fe7a824401f9afbd3544a69ccde4ef8e pristine_git_object: b8fc9cf40c8cb010231837ffe3d66cb3762dd666 - docs/models/imagegenerationtooltype.md: - id: fc670aabaff7 - last_write_checksum: sha1:234109f99f467905e6e7b74036e2c395090840e4 - pristine_git_object: 29681b58e1afe945faa76f9dd424deb01cdfb1bd docs/models/imageurl.md: id: e75dd23cec1d last_write_checksum: sha1:30131c77dd240c3bae48d9693698358e5cc0ae63 pristine_git_object: 7c2bcbc36e99c3cf467d213d6a6a59d6300433d8 docs/models/imageurlchunk.md: id: 4407097bfff3 - last_write_checksum: sha1:7a478fd638234ece78770c7fc5e8d0adaf1c3727 + last_write_checksum: sha1:73e14a0beccfc9465ee6d2990462e609903f5cd5 pristine_git_object: f1b926ef8e82443aa1446b1c64c2f02e33d7c789 - docs/models/imageurlchunkimageurl.md: - id: c7fae88454ce - last_write_checksum: sha1:5eff71b7a8be7baacb9ba8ca0be0a0f7a391a325 - pristine_git_object: 767389082d25f06e617fec2ef0134dd9fb2d4064 docs/models/imageurlchunktype.md: id: b9af2db9ff60 last_write_checksum: sha1:990546f94648a09faf9d3ae55d7f6ee66de13e85 pristine_git_object: 2064a0b405870313bd4b802a3b1988418ce8439e + docs/models/imageurlunion.md: + last_write_checksum: sha1:4e32bcd7d44746d2ddbfafbef96152bb2bdb2a15 docs/models/inputentries.md: id: a5c647d5ad90 last_write_checksum: sha1:4231bb97837bdcff4515ae1b00ff5e7712256e53 @@ -960,38 +870,26 @@ trackedFiles: id: 4b0a7fb87af8 last_write_checksum: sha1:19d8da9624030a47a3285276c5893a0fc7609435 pristine_git_object: 0f62a7ce8e965d0879507e98f808b9eb254282a6 + docs/models/inputsmessage.md: + last_write_checksum: sha1:92a95c1757e33603d1aa9ed6c9912d1c551d9974 docs/models/instructrequest.md: id: a0034d7349a2 - last_write_checksum: sha1:91c446be8428efd44163ed8366a37c376554211a + last_write_checksum: sha1:34a81411110cbb7a099c45e482f5d1702ae48fd3 pristine_git_object: 9500cb588b5d27d934b04cc5fa0be26a270f6d82 docs/models/instructrequestinputs.md: id: 2a677880e32a - last_write_checksum: sha1:1b989ef7ef4c84f59c83af11b3243d934c85e348 + last_write_checksum: sha1:64bcc6371d70446da60f167682504568d7f2618c pristine_git_object: 4caa028f85be2324966e61321c917cbd0c65de01 - docs/models/instructrequestinputsmessages.md: - id: c0cb1f866e69 - last_write_checksum: sha1:558f78fafbd44c5ea7030491a39d0c7ccd994d01 - pristine_git_object: 237e131f1b1161c8b90df11d49739f5bfe9ee829 - docs/models/instructrequestmessages.md: - id: 639538e7d70d - last_write_checksum: sha1:8c26b3b97f095e5c525b0e3c18d45aded9bd03a2 - pristine_git_object: 9c866a7db86b40e997cb3f06d68e67eb033f3360 - docs/models/integrations.md: - id: f9eb2b4df2f8 - last_write_checksum: sha1:e0b12cf5661d4e6332da28913c5394e5a85071bf - pristine_git_object: 35214d63ef2b902aa39bfdd2fd6dc5f319cc203b + docs/models/instructrequestmessage.md: + last_write_checksum: sha1:551b5d6dd3ba0b39cad32478213a9eb7549f0023 docs/models/jobin.md: id: 1b7b37214fa8 - last_write_checksum: sha1:6dadb7d78e2dc04966bd041ddb54428108098f76 + last_write_checksum: sha1:16436f5d3222b89d604cf326bde749d9e6f9da39 pristine_git_object: b96517705cea7b9efd266f146080ad1aed3cc8cb - docs/models/jobinintegrations.md: - id: 5f293420eced - last_write_checksum: sha1:288931c5427e1a435b1396e131e95a43cbcbc2b9 - pristine_git_object: 91c102426d05b4f88ca5a661f53f1acf316b5b88 - docs/models/jobinrepositories.md: - id: 5c94c2d28ce8 - last_write_checksum: sha1:e7fbe667fa5703dedd78672d936f1b02caf301b5 - pristine_git_object: b94477af4c51c7939fd6dcdb75cbc56459d4a30a + docs/models/jobinintegration.md: + last_write_checksum: sha1:c9887897357e01e6e228b48d6bf0c3fb4edd29f7 + docs/models/jobinrepository.md: + last_write_checksum: sha1:1773f59546b94688d0be16d3f5f014cd86f5b1d7 docs/models/jobmetadataout.md: id: 30eb634fe247 last_write_checksum: sha1:46d54b6f6004a6e571afd5207db5170dfbce7081 @@ -1022,7 +920,7 @@ trackedFiles: pristine_git_object: 1b331662b17cd24c22e88b01bf00d042cb658516 docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md: id: 8aa8030f26d7 - last_write_checksum: sha1:ebc6ac03e99d69fed1bae6cb4e858e0aecf2dd88 + last_write_checksum: sha1:619bb7677fa549f5089fde98f3a00ab1d939f80d pristine_git_object: eeddc3cdfdd975cdb69fbfcd306e9445010eb82f docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md: id: a9b75762e534 @@ -1034,8 +932,10 @@ trackedFiles: pristine_git_object: e0d2e3610ce460d834c2d07d9a34b09f8257217b docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md: id: 52078f097503 - last_write_checksum: sha1:5d8fe21d292264209508ae484a7e88d33bff373f + last_write_checksum: sha1:fc134fdc7e229b8df373b77096c8299c214171a7 pristine_git_object: 3dca3cd85245e0956b557fc5d6ae6c5e265df38d + docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md: + last_write_checksum: sha1:bbc08ca53c2da180b96ed0347cf4954410c79311 docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md: id: b4e2b814d8c3 last_write_checksum: sha1:f13b5c8f2e74cc73b58a30d366032c764603f95e @@ -1068,10 +968,6 @@ trackedFiles: id: 1c99619e2435 last_write_checksum: sha1:cffbcfb8673e12feb8e22fd397bf68c8745c76bb pristine_git_object: f6c8a2c3079003a885ee9bdfc73cf7c7c7d8eded - docs/models/jobtype.md: - id: 86685dbc7863 - last_write_checksum: sha1:da927d34a69b0b2569314cc7a62733ee1ab85186 - pristine_git_object: 847c662259537ed54cc108e8de8d8eb93defbe58 docs/models/jsonschema.md: id: a6b15ed6fac8 last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f @@ -1124,13 +1020,9 @@ trackedFiles: id: b9147b1c0e38 last_write_checksum: sha1:45b2cc114886b300e3b996a8b71241ac5c7260a3 pristine_git_object: 2f18b014af4577a0ae862dfeea599d5f700005cb - docs/models/librariesdocumentsuploadv1documentupload.md: - id: c76458963b1c - last_write_checksum: sha1:6973cb619a8e50bb12e96cffdc6b57fcf7add000 - pristine_git_object: a0ba95da33a248fd639ca1af5f443fd043dae0ea docs/models/librariesdocumentsuploadv1request.md: id: 89a89d889c72 - last_write_checksum: sha1:4f67f0bc5b2accb6dcf31ce7be0e9447ab4da811 + last_write_checksum: sha1:32294a87d8a0b173b4d6f12b607a1bb3da765776 pristine_git_object: 7c91ca9b92839be8ab1efb4428cc8d7a78d57e1e docs/models/librariesgetv1request.md: id: f47ad71ec7ca @@ -1184,10 +1076,6 @@ trackedFiles: id: b071d5a509cc last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 - docs/models/message.md: - id: a9614076792b - last_write_checksum: sha1:9199637b21212e630336d0d513c6b799732dee54 - pristine_git_object: 752f04a8b5ec3bedb0b5c3e4fbf3e5c3fccc07cd docs/models/messageentries.md: id: 9af3a27b862b last_write_checksum: sha1:a3eb6e37b780644313738f84e6c5ac653b4686bc @@ -1198,12 +1086,14 @@ trackedFiles: pristine_git_object: 4fd18a0dcb4f6af4a9c3956116f8958dc2fa78d1 docs/models/messageinputentry.md: id: eb74af2b9341 - last_write_checksum: sha1:a65737ba7d9592ff91b42689c5c98fca8060d868 + last_write_checksum: sha1:07124339ecb87e31df5f0e2f887e23209dd269af pristine_git_object: d55eb8769c3963518fcbc910d2e1398b6f46fd87 docs/models/messageinputentrycontent.md: id: 7e12c6be6913 last_write_checksum: sha1:6be8be0ebea2b93712ff6273c776ed3c6bc40f9a pristine_git_object: 65e55d97606cf6f3119b7b297074587e88d3d01e + docs/models/messageinputentryobject.md: + last_write_checksum: sha1:7746753005fda37834a73e62bf459eacb740ba5b docs/models/messageinputentryrole.md: id: 2497d07a793d last_write_checksum: sha1:a41eb58f853f25489d8c00f7a9595f443dcca2e6 @@ -1238,7 +1128,7 @@ trackedFiles: pristine_git_object: cb4a7a1b15d44a465dbfbd7fe319b8dbc0b62406 docs/models/messageoutputevent.md: id: b690693fa806 - last_write_checksum: sha1:8a87ff6b624d133bcea36729fb1b1a1a88b3eaf0 + last_write_checksum: sha1:d6538a4b5d5721c09bc196f3e9523ed45dafbea7 pristine_git_object: 92c1c61587e34f6e143263e35c33acc9332870d6 docs/models/messageoutputeventcontent.md: id: cecea075d823 @@ -1248,14 +1138,6 @@ trackedFiles: id: 87d07815e9be last_write_checksum: sha1:a6db79edc1bf2d7d0f4762653c8d7860cb86e300 pristine_git_object: e38c6472e577e0f1686e22dc61d589fdb2928434 - docs/models/messageoutputeventtype.md: - id: 13c082072934 - last_write_checksum: sha1:03c07b7a6046e138b9b7c02084727785f05a5a67 - pristine_git_object: 1f43fdcce5a8cfe4d781b4a6faa4a265975ae817 - docs/models/messages.md: - id: 2103cd675c2f - last_write_checksum: sha1:f6940c9c67b98c49ae2bc2764f6c14178321f244 - pristine_git_object: 1d394500e8ffdd140457575568fc2ce465a1cc3a docs/models/metricout.md: id: 7c6ff0ad95f9 last_write_checksum: sha1:eef34dc522a351e23d7371c00a07662a0711ea73 @@ -1270,24 +1152,20 @@ trackedFiles: pristine_git_object: c7dd2710011451c2db15f53ebc659770e786c4ca docs/models/modelconversation.md: id: 497521ee9bd6 - last_write_checksum: sha1:bd11f51f1b6fedbf8a1e1973889d1961086c164f + last_write_checksum: sha1:440c9e7c306f20bd4f4b27ab0cf770d3bf8762e2 pristine_git_object: 1a03ef7d1dd9e1d6b51f0f9391c46feb5cd822a8 docs/models/modelconversationobject.md: id: 4c5699d157a9 last_write_checksum: sha1:8e2e82e1fa4cb97f8c7a8a129b3cc9cd651e4055 pristine_git_object: ead1fa26f5d9641a198a14b43a0f5689456e5821 - docs/models/modelconversationtools.md: - id: b3463ae729a7 - last_write_checksum: sha1:eb78650e337ab5354a0cdfbfcf975ed02495230b - pristine_git_object: 5cc97437c34263ad650c84c8702e158ee74ecfb1 + docs/models/modelconversationtool.md: + last_write_checksum: sha1:9b33f73330e5ae31de877a904954efe342e99c4f docs/models/modellist.md: id: ce07fd9ce413 - last_write_checksum: sha1:4f2956eeba39cc14f2289f24990e85b3588c132a + last_write_checksum: sha1:b4c22b5eff4478ffa5717bd5af92ca79f4a90b01 pristine_git_object: 760882c6c5b442b09bbc91f910f960138d6a00c8 - docs/models/modeltype.md: - id: 9f69805691d1 - last_write_checksum: sha1:f3a8bce458460e55124ce5dd6814e7cada8e0e89 - pristine_git_object: a31c3ca0aa78cae9619b313f1cda95b9c391ee12 + docs/models/modellistdata.md: + last_write_checksum: sha1:7394ba5645f990163c4d777ebbfc71f24c5d3a74 docs/models/moderationobject.md: id: 4e84364835f5 last_write_checksum: sha1:2831033dcc3d93d32b8813498f6eb3082e2d3c4e @@ -1296,14 +1174,8 @@ trackedFiles: id: e15cf12e553b last_write_checksum: sha1:18e8f4b4b97cb444824fcdce8f518c4e5a27c372 pristine_git_object: 75a5eec74071fdd0d330c9f3e10dac0873077f20 - docs/models/name.md: - id: 6ee802922293 - last_write_checksum: sha1:91a266ed489c046a4ec511d4c03eb6e413c2ff02 - pristine_git_object: 18b978a8cc2c38d65c37e7dd110315cedb221620 - docs/models/object.md: - id: 7ffe67d0b83f - last_write_checksum: sha1:dfb590560db658dc5062e7cedc1f3f29c0d012a0 - pristine_git_object: 0122c0db4541d95d57d2edb3f18b9e1921dc3099 + docs/models/multipartbodyparams.md: + last_write_checksum: sha1:34e68e3795c7987138abd152177fa07198d2f6f6 docs/models/ocrimageobject.md: id: b72f3c5853b2 last_write_checksum: sha1:90c5158dec6a7b31c858677b6a8efa1e3cabd504 @@ -1332,18 +1204,12 @@ trackedFiles: id: 419abbb8353a last_write_checksum: sha1:6e717a3f3de3c464e8b3237f06867cdfecec339e pristine_git_object: d9d79125cb02bc2b09d8dc543a5e2d4a6c55571c - docs/models/one.md: - id: 69a5df93c480 - last_write_checksum: sha1:cb6d46c2939a0e2314e29ff0307a2b0632caca65 - pristine_git_object: 3de496a6201d47ea52fc15bfe16a44bd6d3be900 + docs/models/output.md: + last_write_checksum: sha1:600058f0b0f589d8688e9589762c45a0dd18cc9b docs/models/outputcontentchunks.md: id: f7e175c8e002 last_write_checksum: sha1:5094466110028801726cc825e8809f524fe1ee24 pristine_git_object: c76bc31d4d8791b7bef4dc6cbff6671b38a7927d - docs/models/outputs.md: - id: 58b672ddb5b3 - last_write_checksum: sha1:7553d62771ac5a85f8f330978b400cdd420cf865 - pristine_git_object: 7756c6276cc141b69d8099e0bbcbd2bccc1b5112 docs/models/paginationinfo.md: id: 3d2b61cbbf88 last_write_checksum: sha1:1da38e172024fe703f3180ea3c6ec91fe3c51ed0 @@ -1356,22 +1222,16 @@ trackedFiles: id: 83c8c59c1802 last_write_checksum: sha1:046375bb3035cc033d4484099cd7f5a4f53ce88c pristine_git_object: 7b67583f4209778ac6f945631c0ee03ba1f4c663 - docs/models/queryparamagentversion.md: - id: 49d942f63049 - last_write_checksum: sha1:42557c6bf0afc1eabde48c4b6122f801608d8f05 - pristine_git_object: 3eb5ef1840299139bf969379cbfc3ed49127f176 - docs/models/queryparamstatus.md: - id: 15628120923d - last_write_checksum: sha1:36f1c9b6a6af6f27fbf0190417abf95b4a0bc1b9 - pristine_git_object: dcd2090861b16f72b0fb321714b4143bc14b7566 docs/models/realtimetranscriptionerror.md: id: 4bc5e819565b last_write_checksum: sha1:c93e4b19a0aa68723ea69973a9f22a581c7b2ff6 pristine_git_object: e01f2126b3084eade47a26ea092556f7f61142c9 docs/models/realtimetranscriptionerrordetail.md: id: ea137b1051f1 - last_write_checksum: sha1:43ae02b32b473d8ba1aaa3b336a40f706d6338d0 + last_write_checksum: sha1:7e1d18760939d6087cda5fba54553141f8a78d1e pristine_git_object: 96420ada2ac94fca24a36ddacae9c876e14ccb7a + docs/models/realtimetranscriptionerrordetailmessage.md: + last_write_checksum: sha1:f8c3a4984d647d64e8ea4e1e42654265ffe46b0f docs/models/realtimetranscriptionsession.md: id: aeb0a0f87d6f last_write_checksum: sha1:c3aa4050d9cc1b73df8496760f1c723d16183f3a @@ -1392,38 +1252,20 @@ trackedFiles: id: 0944b80ea9c8 last_write_checksum: sha1:956b270766c7f11fe99f4a9b484cc29c159e7471 pristine_git_object: 1e0e2fe64883ef5f3e628777b261b1224661d257 - docs/models/repositories.md: - id: 0531efe9bced - last_write_checksum: sha1:249bdb315eb1f0bd54601e5b8a45e58cb1ec7638 - pristine_git_object: 02274e3d58d55f4a18dfdf578fa53d2459e1345e docs/models/requestsource.md: id: 8857ab6025c4 last_write_checksum: sha1:4b7ecc7c5327c74e46e2b98bd6e3814935cdecdf pristine_git_object: c81c115992439350d56c91d2e3351a13df40676b - docs/models/response1.md: - id: 245c499462a9 - last_write_checksum: sha1:6d64b50b59875744eb3c1038d7cdcba9397fdbae - pristine_git_object: 2e73fdbb204c14cadc028d0891ede0ca4d4178d7 - docs/models/responsebody.md: - id: a2c4400c632e - last_write_checksum: sha1:a1705a40914ac8f96000953bd53ca01f66643fcd - pristine_git_object: 8a218517178eed859683f87f143c5397f96d10d9 + docs/models/response.md: + last_write_checksum: sha1:f4a3ec06ff53cd1cbdf892ff7152d39fa1746821 docs/models/responsedoneevent.md: id: 38c38c3c065b - last_write_checksum: sha1:9910c6c35ad7cb8e5ae0edabcdba8a8a498b3138 + last_write_checksum: sha1:4ac3a0fd91d5ebaccce7f4098ae416b56e08416f pristine_git_object: ec25bd6d364b0b4959b11a6d1595bdb57cba6564 - docs/models/responsedoneeventtype.md: - id: 03a896b6f98a - last_write_checksum: sha1:09ccbc7ed0143a884481a5943221be2e4a16c123 - pristine_git_object: 58f7f44d74553f649bf1b54385926a5b5d6033f5 docs/models/responseerrorevent.md: id: 3e868aa9958d - last_write_checksum: sha1:9ed1d04b3ed1f468f4dc9218890aa24e0c84fc03 + last_write_checksum: sha1:4711077bf182e4f3406dd12357da49d37d172b4c pristine_git_object: 2ea6a2e0ec412ae484f60fa1d09d02e776499bb9 - docs/models/responseerroreventtype.md: - id: 5595b8eec59e - last_write_checksum: sha1:442185b0615ec81923f4c97478e758b451c52439 - pristine_git_object: 3b3fc303fc7f75c609b18a785f59517b222b6881 docs/models/responseformat.md: id: 50a1e4140614 last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add @@ -1432,14 +1274,14 @@ trackedFiles: id: cf1f250b82db last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f + docs/models/responseretrievemodelv1modelsmodelidget.md: + last_write_checksum: sha1:6bae62cbb18559065a53f0acdacb1f72f513467e docs/models/responsestartedevent.md: id: 88e3b9f0aa8d - last_write_checksum: sha1:fa9db583e8223d2d8284866f7e6cf6d775751478 + last_write_checksum: sha1:156f38bbe8278f9c03117135938e7cbdae3038b9 pristine_git_object: 481bd5bba67a524dbadf9f1570a28ae20ec9f642 - docs/models/responsestartedeventtype.md: - id: 1d27fafe0f03 - last_write_checksum: sha1:c30ca125ec76af9a2191ebc125f5f8b9558b0ecb - pristine_git_object: 2d9273bd02bf371378575619443ec948beec8d66 + docs/models/responsev1conversationsget.md: + last_write_checksum: sha1:8e75db359f0d640a27498d20c2ea6d561c318d7e docs/models/retrievefileout.md: id: 8e82ae08d9b5 last_write_checksum: sha1:600d5ea4f75dab07fb1139112962affcf633a6c9 @@ -1448,14 +1290,6 @@ trackedFiles: id: ac567924689c last_write_checksum: sha1:7534c5ec5f1ae1e750c8f610f81f2106587e81a9 pristine_git_object: f1280f8862e9d3212a5cfccd9453884b4055710a - docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md: - id: c2a914584353 - last_write_checksum: sha1:bdd52e2c434fc6fd10e341d41de9dda1a28ddb4f - pristine_git_object: 3ac96521a8f58f1ed4caedbb4ab7fe3fe2b238c5 - docs/models/role.md: - id: b694540a5b1e - last_write_checksum: sha1:260a50c56a8bd03cc535edf98ebec06437f87f8d - pristine_git_object: affca78d5574cc42d8e6169f21968e5a8765e053 docs/models/sampletype.md: id: 0e09775cd9d3 last_write_checksum: sha1:33cef5c5b097ab7a9cd6232fe3f7bca65cd1187a @@ -1488,17 +1322,9 @@ trackedFiles: id: 6a902241137c last_write_checksum: sha1:567027284c7572c0fa24132cd119e956386ff9d0 pristine_git_object: ae06b5e870d31b10f17224c99af1628a7252bbc3 - docs/models/status.md: - id: 959cd204aadf - last_write_checksum: sha1:618f30fd5ba191bb918c953864bfac4a63192a40 - pristine_git_object: 5e22eb736c734121b4b057812cacb43b3e299b52 - docs/models/stop.md: - id: f231cc9f5041 - last_write_checksum: sha1:86903cac5f57ad9b8ac07ecba6c454d40a53bdc8 - pristine_git_object: ba40ca83136d6d6cb4f1ef9e5ca3104a704e4846 docs/models/systemmessage.md: id: fdb7963e1cdf - last_write_checksum: sha1:97e726dff19a39b468767d5c01fc6256277ee71f + last_write_checksum: sha1:561c3372391e093c890f477b3213c308ead50b81 pristine_git_object: 0dba71c00f40c85e74b2c1967e077ffff9660f13 docs/models/systemmessagecontent.md: id: 94a56febaeda @@ -1554,32 +1380,26 @@ trackedFiles: pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 docs/models/toolexecutiondeltaevent.md: id: f2fc876ef7c6 - last_write_checksum: sha1:901756826684886179c21f47c063c55700c79ec4 + last_write_checksum: sha1:ae1462a9b5cb56002b41f477ce262cb64ccf2f4e pristine_git_object: 7bee6d831a92085a88c0772300bcad4ce8194edb docs/models/toolexecutiondeltaeventname.md: id: 93fd3a3b669d last_write_checksum: sha1:d5dcdb165c220209ee76d81938f2d9808c77d4fc pristine_git_object: 9c3edef8c0698d7293a71ee56410a0ed67fd1924 - docs/models/toolexecutiondeltaeventtype.md: - id: ae6e8a5bf0ce - last_write_checksum: sha1:dd405269077b6a4756fd086067c9bbe88f430924 - pristine_git_object: a4a2f8cc9927499c990bad0590e84b2a609add8d docs/models/toolexecutiondoneevent.md: id: b604a4ca5876 - last_write_checksum: sha1:267ff0e19884e08abf3818b890579c1a13a3fa98 + last_write_checksum: sha1:6b6975ded0b0495b6c56250d153186c7818b5958 pristine_git_object: 5898ea5eff103b99886789805d9113dfd8b01588 docs/models/toolexecutiondoneeventname.md: id: d19dc0060655 last_write_checksum: sha1:aa5677087e6933699135a53f664f5b86bbae5ac6 pristine_git_object: 6449079d7b467796355e3353f4245046cced17e8 - docs/models/toolexecutiondoneeventtype.md: - id: 7c5a318d924b - last_write_checksum: sha1:55a5041cdf8c7e05fcfd7260a72f7cd3f1b2baf8 - pristine_git_object: 872624c1f274259cdd22100995b5d99bf27eaeac docs/models/toolexecutionentry.md: id: 75a7560ab96e - last_write_checksum: sha1:66086952d92940830a53f5583f1751b09d902fcf + last_write_checksum: sha1:fdaa9abd5417486100ffc7059fcfdc8532935ed3 pristine_git_object: 3678116df64ad398fef00bab39dd35c3fd5ee1f5 + docs/models/toolexecutionentryname.md: + last_write_checksum: sha1:6c528cdfbb3f2f7dc41d11f57c86676f689b8845 docs/models/toolexecutionentryobject.md: id: af106f91001f last_write_checksum: sha1:6df075bee4e84edf9b57fcf62f27b22a4e7700f4 @@ -1590,16 +1410,12 @@ trackedFiles: pristine_git_object: a67629b8bdefe59d188969a2b78fa409ffeedb2a docs/models/toolexecutionstartedevent.md: id: 37657383654d - last_write_checksum: sha1:3051a74c1746c8341d50a22f34bd54f6347ee0c8 + last_write_checksum: sha1:47126a25c2a93583038ff877b85fc9ae1dcef9f3 pristine_git_object: de81312bda08970cded88d1b3df23ebc1481ebf2 docs/models/toolexecutionstartedeventname.md: id: be6b33417678 last_write_checksum: sha1:f8857baa02607b0a0da8d96d130f1cb765e3d364 pristine_git_object: 3308c483bab521f7fa987a62ebd0ad9cec562c3a - docs/models/toolexecutionstartedeventtype.md: - id: 9eff7a0d9ad5 - last_write_checksum: sha1:86fe6aec11baff4090efd11d10e8b31772598349 - pristine_git_object: 56695d1f804c28808cf92715140959b60eb9a9fd docs/models/toolfilechunk.md: id: 67347e2bef90 last_write_checksum: sha1:0a499d354a4758cd8cf06b0035bca105ed29a01b @@ -1614,16 +1430,12 @@ trackedFiles: pristine_git_object: 7e99acefff265f616b576a90a5f0484add92bffb docs/models/toolmessage.md: id: 0553747c37a1 - last_write_checksum: sha1:3ac87031fdd4ba8b0996e95be8e7ef1a7ff41167 + last_write_checksum: sha1:f35fa287b94d2c1a9de46c2c479dadd5dca7144d pristine_git_object: a54f49332c2873471759b477fb4c712fa4fb61f5 docs/models/toolmessagecontent.md: id: f0522d2d3c93 last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 - docs/models/toolmessagerole.md: - id: f333d4d1ab56 - last_write_checksum: sha1:7e1c004bad24e928da0c286a9f053516b172d24f - pristine_git_object: c24e59c0c79ea886d266e38c673edd51531b9be6 docs/models/toolreferencechunk.md: id: 10414b39b7b3 last_write_checksum: sha1:2e24f2331bb19de7d68d0e580b099c03f5207199 @@ -1636,10 +1448,6 @@ trackedFiles: id: 42a4cae4fd96 last_write_checksum: sha1:43620d9529a1ccb2fac975fbe2e6fcaa62b5baa5 pristine_git_object: bc57d277a39eef3c112c08ffc31a91f5c075c5a4 - docs/models/tools.md: - id: b78ed2931856 - last_write_checksum: sha1:ea4dcd2eafe87fc271c2f6f22f9b1cedc9f8316e - pristine_git_object: f308d732e3adfcc711590c3e1bee627c94032a6b docs/models/tooltypes.md: id: adb50fe63ea2 last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c @@ -1654,16 +1462,14 @@ trackedFiles: pristine_git_object: 1bc0189c5d1833c946a71c9773346e21b08d2404 docs/models/transcriptionsegmentchunk.md: id: f09db8b2273e - last_write_checksum: sha1:b89ee132a3c63e56806f3f395c98a9e7e5e9c7d0 + last_write_checksum: sha1:5387f2595d14f34b8af6182c34efac4874a98308 pristine_git_object: f620b96a75a0b9c6e015ae1f460dcccb80d113ee + docs/models/transcriptionsegmentchunktype.md: + last_write_checksum: sha1:63d511c2bd93bd477f1b7aae52954b28838316d9 docs/models/transcriptionstreamdone.md: id: 2253923d93cf - last_write_checksum: sha1:043ebcd284007f8c8536f2726ec5f525abffeb6b + last_write_checksum: sha1:2a1910d59be258af8dd733b8911e5a0431fab5a4 pristine_git_object: 9ecf7d9ca32410d92c93c62ead9674e097533ec3 - docs/models/transcriptionstreamdonetype.md: - id: 3f5aec641135 - last_write_checksum: sha1:b86f7b20dff031e7dbe02b4805058a025c39dcac - pristine_git_object: db092c4fa47d7401919a02c199198e4ae99a5de1 docs/models/transcriptionstreamevents.md: id: d0f4eedfa2b6 last_write_checksum: sha1:ec6b992049bd0337d57baab56603b1fa36a0a35b @@ -1678,36 +1484,16 @@ trackedFiles: pristine_git_object: e4eb25a6400dcc5a48b5eb5f65e96f7be91fa761 docs/models/transcriptionstreamlanguage.md: id: 5e9df200153c - last_write_checksum: sha1:82967c1b056bc1358adb21644bf78f0e37068e0f + last_write_checksum: sha1:d5626a53dde8d6736bab75f35cee4d6666a6b795 pristine_git_object: e16c8fdce3f04ae688ddc18650b359d2dd5d6f6f - docs/models/transcriptionstreamlanguagetype.md: - id: 81c8bd31eeb1 - last_write_checksum: sha1:6cf3efec178180266bccda24f27328edfbebbd93 - pristine_git_object: e93521e10d43299676f44c8297608cc94c6106e6 docs/models/transcriptionstreamsegmentdelta.md: id: f59c3fb696f2 - last_write_checksum: sha1:4d03e881a4ad9c3bed6075bb8e25d00af391652c + last_write_checksum: sha1:4a031b76315f66c3d414a7dd5f34ae1b5c239b2e pristine_git_object: 2ab32f9783f6645bba7603279c03db4465c70fff - docs/models/transcriptionstreamsegmentdeltatype.md: - id: 03ee222a3afd - last_write_checksum: sha1:d02b5f92cf2d8182aeaa8dd3428b988ab4fc0fad - pristine_git_object: 03ff3e8bb4f25770200ed9fb43dd246375934c58 docs/models/transcriptionstreamtextdelta.md: id: 69a13554b554 - last_write_checksum: sha1:9f6c7bdc50484ff46b6715141cee9912f1f2f3ff + last_write_checksum: sha1:de31f5585d671f85e6a9b8f04938cf71000ae3f7 pristine_git_object: adddfe187546c0161260cf06953efb197bf25693 - docs/models/transcriptionstreamtextdeltatype.md: - id: ae14d97dc3fa - last_write_checksum: sha1:2abfea3b109518f7371ab78ade6fa514d6e3e968 - pristine_git_object: b7c9d675402cd122ee61deaa4ea7051c2503cf0e - docs/models/two.md: - id: 3720b8efc931 - last_write_checksum: sha1:8676158171bef1373b5e0b7c91a31c4dd6f9128a - pristine_git_object: 59dc2be2a2036cbdac26683e2afd83085387188f - docs/models/type.md: - id: 98c32f09b2c8 - last_write_checksum: sha1:9b07c46f7e1aacaab319e8dfdcfdfc94a2b7bf31 - pristine_git_object: d05ead75c8f6d38b4dbcc2cdad16f1ba4dd4f7e8 docs/models/unarchiveftmodelout.md: id: 4f2a771b328a last_write_checksum: sha1:b3be8add91bbe10704ff674891f2e6377b34b539 @@ -1730,16 +1516,12 @@ trackedFiles: pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 docs/models/usermessage.md: id: ed66d7a0f80b - last_write_checksum: sha1:8291f7703e49ed669775dc953ea8cab6715dc7ed + last_write_checksum: sha1:627f88dbb89e226a7d92564658c23a0e8d71342a pristine_git_object: 63b0131091cd211b3b1477c1d63b5666a26db546 docs/models/usermessagecontent.md: id: 52c072c851e8 last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a pristine_git_object: 8350f9e8f8996c136093e38760990f62fd01f8cf - docs/models/usermessagerole.md: - id: 99ffa937c462 - last_write_checksum: sha1:52014480516828b43827aa966b7319d9074f1111 - pristine_git_object: 171124e45988e784c56a6b92a0057ba00efc0db4 docs/models/utils/retryconfig.md: id: 4343ac43161c last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d @@ -1750,55 +1532,43 @@ trackedFiles: pristine_git_object: 7a1654a1a5cfb3ab92360b361e8e962bf2db4582 docs/models/wandbintegration.md: id: ba1f7fe1b1a3 - last_write_checksum: sha1:1702d58db559818304404a5dc8c70d71fb2be716 + last_write_checksum: sha1:ef35648cec304e58ccd804eafaebe9547d78ddcf pristine_git_object: 199d2eddc61069c80b628a12bff359ac2abc7338 docs/models/wandbintegrationout.md: id: c1a0f85273d8 - last_write_checksum: sha1:c2addbba8c15b7c115129d5249c4a6d7dc527d2d + last_write_checksum: sha1:ce7ffc6cc34931b4f6d2b051ff63e1ca39e13882 pristine_git_object: cec02ed87555128e6027e00f3385a61028694ac0 - docs/models/wandbintegrationouttype.md: - id: 647c7c2eab8a - last_write_checksum: sha1:78ad7847183b18319995b5e3de0262ba6fffecac - pristine_git_object: 5a7533c99671e0556c3c11f179312ec8268ce477 - docs/models/wandbintegrationtype.md: - id: 08c414c73826 - last_write_checksum: sha1:0990c604ec45f2f1fd1019e87705533b0c9be023 - pristine_git_object: 4fdffe22e370fd64429d83753c30a0079be0e7fd docs/models/websearchpremiumtool.md: id: 267988aa8c3f - last_write_checksum: sha1:cc040d754d40c644a2a8fd70302eb7ee864bfff3 + last_write_checksum: sha1:f9b761d727cbe0c60a2d0800b0a93929c5c3f5e7 pristine_git_object: 941fc2b8448d4caeae9318fdf08053a2b59a9bee - docs/models/websearchpremiumtooltype.md: - id: c70fa6b0ee9f - last_write_checksum: sha1:069ad330c3f5b3c6b8a375de4484f151698c439c - pristine_git_object: 348bfe854914114c84cd74997a63fe2badc0756d docs/models/websearchtool.md: id: fc4df52fb9b5 - last_write_checksum: sha1:53e128c3f0f6781227d99d46838579dc15ab26d2 + last_write_checksum: sha1:047fd9f950d5a86cf42a8f3ac40f754b395e39ec pristine_git_object: c8d708bdcdbfc387a09683bdd47ebabedd566cb0 - docs/models/websearchtooltype.md: - id: 6591e569c4f3 - last_write_checksum: sha1:f9b6672bc3fbb5bb70c4919cb7b98160a0ebe9ff - pristine_git_object: 57b6acbbd3b85aae5a9b7e2f754689637c01a912 docs/sdks/accesses/README.md: id: 2ea167c2eff2 last_write_checksum: sha1:22bd7a11d44295c2f433955604d3578292f26c99 pristine_git_object: 64a1e749aeb6f2c32497a72a649ecc2b7549c077 docs/sdks/agents/README.md: id: 5965d8232fd8 - last_write_checksum: sha1:34e01f46c1a32020fa3eeb40fe80c3c0e8de0983 + last_write_checksum: sha1:a655952f426d5459fa958fa5551507e4fb3f29a8 pristine_git_object: 75efc492c4114417c22a796824ee971e9180104e + docs/sdks/batchjobs/README.md: + last_write_checksum: sha1:212bc82280a58f896172d173e5be516b926bc11c + docs/sdks/betaagents/README.md: + last_write_checksum: sha1:131f220aefaff8a3ca912df661199be7a88d50ca docs/sdks/chat/README.md: id: 393193527c2c - last_write_checksum: sha1:7bc2201f585bea247c0bb148ecdea220bcb384e1 + last_write_checksum: sha1:908e67969e8f17bbcbe3697de4233d9e1dd81a65 pristine_git_object: 89c4fffbb777427723307b13c124668601ff5839 docs/sdks/classifiers/README.md: id: 74eb09b8d620 - last_write_checksum: sha1:f424721545e683e230ee0c612765be2bdb9897cd + last_write_checksum: sha1:f9cc75dbb32ea9780a9d7340e524b7f16dc18070 pristine_git_object: 634ee419f3334ba50dd25f0e2340c32db1ec40b3 docs/sdks/conversations/README.md: id: e22a9d2c5424 - last_write_checksum: sha1:5ed03d60808cff2539e0e83df4714b3a274208a0 + last_write_checksum: sha1:f55def6eaab9fcbed0e86a4dee60e5c2656f0805 pristine_git_object: acd43cdb63edd23665e808aaccc6ab3a4dc3dc85 docs/sdks/documents/README.md: id: 9758e88a0a9d @@ -1816,25 +1586,15 @@ trackedFiles: id: 499b227bf6ca last_write_checksum: sha1:34ff7167b0597bf668ef75ede016cb8884372d1b pristine_git_object: 3c8c59c79db12c916577d6c064ddb16a511513fd - docs/sdks/jobs/README.md: - id: 7371cdc8b89a - last_write_checksum: sha1:5dcd708cfcbb00d0ab9d41311c363c6fdae101b0 - pristine_git_object: 9c44be7559e2b7127d43ff50777fd32c7cf8b6ee + docs/sdks/finetuningjobs/README.md: + last_write_checksum: sha1:58b5ecea679eab1691f0002c7d3323170d73357b docs/sdks/libraries/README.md: id: df9a982905a3 last_write_checksum: sha1:0c710c0395906333b85bedd516cfca7dcb3b9b42 pristine_git_object: bbdacf0538c6c055fef0c0109aac163e987a3dd5 - docs/sdks/mistralagents/README.md: - id: 20b3478ad16d - last_write_checksum: sha1:b2dcb1516dd05dc38e0e0305969de248994aade4 - pristine_git_object: fe0f6e35a445e17ccedc2031c4b4204f5cc4d650 - docs/sdks/mistraljobs/README.md: - id: 71aafa44d228 - last_write_checksum: sha1:212bc82280a58f896172d173e5be516b926bc11c - pristine_git_object: 8f2358de28e88ffd1e3750292488c486f7bb893b docs/sdks/models/README.md: id: b35bdf4bc7ed - last_write_checksum: sha1:ca13e994ae31ddf37628eba9cc68cf8f64b48404 + last_write_checksum: sha1:37ac4b52ddcdbe548d478aed5fd95091a38b4e42 pristine_git_object: 6fa28ca2e25c0b2f3fbf044b706d19f01193fc3c docs/sdks/ocr/README.md: id: 545e35d2613e @@ -1870,7 +1630,7 @@ trackedFiles: pristine_git_object: e7e1bb7f61527de6095357e4f2ab11e342a4af87 src/mistralai/client/_version.py: id: cc807b30de19 - last_write_checksum: sha1:e654adbd2f066332b48c68d97e995dcc8f7dde84 + last_write_checksum: sha1:c808e81ad8b454d646101b878105d109d74ba6ad pristine_git_object: 8c5d6e54860c69881bf976887910fc32d183c6e5 src/mistralai/client/accesses.py: id: 76fc53bfcf59 @@ -1878,7 +1638,7 @@ trackedFiles: pristine_git_object: 307c7156626e735c802c149ea3547648ea03da09 src/mistralai/client/agents.py: id: e946546e3eaa - last_write_checksum: sha1:4a2bc22e5a6d9aee56d04d2800084eb326ef9ba7 + last_write_checksum: sha1:0ff47f41f9224c1ef6c15b5793c04a7be64f074b pristine_git_object: c04abd21b5b7cb9b8ddfdb52ec67fffa7d21759a src/mistralai/client/audio.py: id: 7a8ed2e90d61 @@ -1890,27 +1650,31 @@ trackedFiles: pristine_git_object: bddc9012f28f7881b75a720a07a3ad60845e472e src/mistralai/client/batch.py: id: cffe114c7ac7 - last_write_checksum: sha1:b7236249d2a6235fc3834b2c3bba3feda838013e + last_write_checksum: sha1:ed3cc7aee50879eca660845e51bb34912505d56a pristine_git_object: d53a45fbcbeb7b1d8fb29c373101c9e2a586b877 + src/mistralai/client/batch_jobs.py: + last_write_checksum: sha1:0ac09a2fcbf9f059cea8197b0961cd78603e9c9c src/mistralai/client/beta.py: id: 981417f45147 - last_write_checksum: sha1:2cf61e620e0e0e969e951d100e42c8c9b8facd27 + last_write_checksum: sha1:538571fbb2b393c64b1e7f53d1e530d989717eb3 pristine_git_object: b30003eae52be5e79838fe994cda8474068a43dc + src/mistralai/client/beta_agents.py: + last_write_checksum: sha1:295438e65ce0453cbb97988fb58d01263d88b635 src/mistralai/client/chat.py: id: 7eba0f088d47 - last_write_checksum: sha1:53558e4f3e5ecc8d2cea51d2462aa3432d8c156e + last_write_checksum: sha1:00d1ec46a2c964b39dae5f02e4d8adf23e5dcc21 pristine_git_object: 6fa210bb01b193e1bd034431923a3d4dc8c8a16c src/mistralai/client/classifiers.py: id: 26e773725732 - last_write_checksum: sha1:b3bed5a404f8837cc12e516f3fb85f47fd37518a + last_write_checksum: sha1:3a65b39ad26b6d1c988d1e08b7b06e88da21bb76 pristine_git_object: 537e2438afcb570a3e436ab4dd8b7d604b35b627 src/mistralai/client/conversations.py: id: 40692a878064 - last_write_checksum: sha1:fedcc53385d833f18fdd393591cb156bc5e5f3d1 + last_write_checksum: sha1:d6b44a85ecf623d0257296d62b05f26742a2a2aa pristine_git_object: 285beddbd175fee210b697d4714c28196c1fa7a2 src/mistralai/client/documents.py: id: bcc17286c31c - last_write_checksum: sha1:82287ef513f2f5ee1acb9ffe8323f2dad0fc86f4 + last_write_checksum: sha1:eb3d1d86cbc2e7e72176ff60370a9ad1d616e730 pristine_git_object: 009a604f1c2fa367d14df7fb9f4078083c4be501 src/mistralai/client/embeddings.py: id: f9c17258207e @@ -1918,7 +1682,7 @@ trackedFiles: pristine_git_object: 359f2f621d1628536b89f66115726064db34e51b src/mistralai/client/files.py: id: f12df4b2ce43 - last_write_checksum: sha1:72c1fda19adff9042461f498d5859bae62d4603a + last_write_checksum: sha1:577d731e40683b309a4848d8534185e738e54d31 pristine_git_object: 97817eab1a4b0a0649a128b06a9f3ff4077dffa5 src/mistralai/client/fim.py: id: 217bea5d701d @@ -1926,35 +1690,25 @@ trackedFiles: pristine_git_object: 4a834fe93a9b9a8af30f681c9541a7cef0a513e1 src/mistralai/client/fine_tuning.py: id: 5d5079bbd54e - last_write_checksum: sha1:e8061f6bb9912d668249c3c20235e9778345d23b + last_write_checksum: sha1:e420e8df4b265b95696085585b1b213b9d05dee4 pristine_git_object: c57425fdf3225eaeccee47a17db198a3974995a3 + src/mistralai/client/fine_tuning_jobs.py: + last_write_checksum: sha1:4dc213f6b47379bd76c97c8fc62a4dc23acbb86e src/mistralai/client/httpclient.py: id: 3e46bde74327 last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 pristine_git_object: 89560b566073785535643e694c112bedbd3db13d - src/mistralai/client/jobs.py: - id: 22e6e695e52b - last_write_checksum: sha1:a040fec9c1a50ec603e2cd22284db526c177a55b - pristine_git_object: 848926eaca286f74b5cfd4b0f0f72a8e2222c52f src/mistralai/client/libraries.py: id: d43a5f78045f last_write_checksum: sha1:5264a24b973f49b4ea7252868f4a76baba9093b4 pristine_git_object: 03a547410e042c19329ea9a91eef1bf25ecdcbe1 - src/mistralai/client/mistral_agents.py: - id: bd22ff89d9bb - last_write_checksum: sha1:7b6d1ac9256c1f958bbc9cf18355b4407f0cffc4 - pristine_git_object: 2ac7a29e4d7ab72c5fa29d13e7a8e4648906ead0 - src/mistralai/client/mistral_jobs.py: - id: e925bb9b27ce - last_write_checksum: sha1:b1d8ecfe998d64637089eb4a5a4cfdf4735717d1 - pristine_git_object: eae4403326ecfdf432a1ca7feb260ffe8ec251cf src/mistralai/client/models/__init__.py: id: e0e8dad92725 - last_write_checksum: sha1:cb1fb02e33b85bf82db7d6fd15b2cc3b109c5060 + last_write_checksum: sha1:1b4b7b007a50570b4592f6121d6fa5556cecae4b pristine_git_object: 23e652220f29a882748661a8c0d21aa2830471bf src/mistralai/client/models/agent.py: id: 1336849c84fb - last_write_checksum: sha1:68609569847b9d638d948deba9563d5460c17b9f + last_write_checksum: sha1:39fca92a9cb4fea59a01b6ce883b1c17395978f8 pristine_git_object: 3bedb3a3a71c116f5ccb0294bc9f3ce6690e47b2 src/mistralai/client/models/agentaliasresponse.py: id: 3899a98a55dd @@ -1966,11 +1720,11 @@ trackedFiles: pristine_git_object: 5dfa8c3137c59be90c655ba8cf8afb8a3966c93a src/mistralai/client/models/agentcreationrequest.py: id: 35b7f4933b3e - last_write_checksum: sha1:60caa3dfa2425ac3ff4e64d81ac9d18df0774157 + last_write_checksum: sha1:99456f8e6d8848f2cebbd96040eefbce73c9c316 pristine_git_object: 61a5aff554f830ab9057ce9ceafc2ce78380290f src/mistralai/client/models/agenthandoffdoneevent.py: id: 82628bb5fcea - last_write_checksum: sha1:79de1153a3fce681ee547cc1d3bd0fd8fc5598d2 + last_write_checksum: sha1:151a49e8a7f110123fd0a41e723dfdb6055e9a8e pristine_git_object: c826aa5e1f2324cddb740b3ffc05095ff26c666d src/mistralai/client/models/agenthandoffentry.py: id: 5030bcaa3a07 @@ -1978,7 +1732,7 @@ trackedFiles: pristine_git_object: 0b0de13f8840e9ab221ea233040ca03241cba8b7 src/mistralai/client/models/agenthandoffstartedevent.py: id: 2f6093d9b222 - last_write_checksum: sha1:772bc7b396285560cdafd7d7fb4bc4ece79179ad + last_write_checksum: sha1:ba4e40a4791bad20a4ac7568e32e34f6f00cfe24 pristine_git_object: 4b8ff1e5e3639fb94b55c0a417e9478d5a4252b2 src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py: id: 23a832f8f175 @@ -1994,7 +1748,7 @@ trackedFiles: pristine_git_object: edcccda19d5c3e784a227c6356285ee48be3d7f2 src/mistralai/client/models/agents_api_v1_agents_getop.py: id: f5918c34f1c7 - last_write_checksum: sha1:412df95a1ac4b4f6a59e4391fd1226f2e26e4537 + last_write_checksum: sha1:efdd7bed8ae19047b48c16c73099d433725181ab pristine_git_object: d4817457a33d49ddaa09e8d41f3b03b69e8e491e src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py: id: a04815e6c798 @@ -2030,7 +1784,7 @@ trackedFiles: pristine_git_object: 81066f90302d79bc2083d1e31aa13656c27cc65f src/mistralai/client/models/agents_api_v1_conversations_getop.py: id: c530f2fc64d0 - last_write_checksum: sha1:28cab443af4d623a22e836ab876da20d84eb8a41 + last_write_checksum: sha1:89088ac683d6830ffd4f649c25ccfb60a4b094de pristine_git_object: c919f99e38148fb9b2d51816d0dd231ee828b11d src/mistralai/client/models/agents_api_v1_conversations_historyop.py: id: 2f5ca33768aa @@ -2038,7 +1792,7 @@ trackedFiles: pristine_git_object: ba1f8890c1083947e4d6882dff2b50b3987be738 src/mistralai/client/models/agents_api_v1_conversations_listop.py: id: 936e36181d36 - last_write_checksum: sha1:b338f793707c25ce9703266d8b7f6f560051b057 + last_write_checksum: sha1:e528bf06983dd0b22a0b0bc1d470b344e85db434 pristine_git_object: bb3c7127c4b43019405689dc2ae10f5933c763bc src/mistralai/client/models/agents_api_v1_conversations_messagesop.py: id: b5141764a708 @@ -2054,15 +1808,15 @@ trackedFiles: pristine_git_object: 8bce3ce519a69a6d1cb36383b22fb801768c4868 src/mistralai/client/models/agentscompletionrequest.py: id: 3960bc4c545f - last_write_checksum: sha1:7f2176c96916c85ac43278f3ac23fe5e3da35aca + last_write_checksum: sha1:d22d3513e2b391127df2202ca50b1fb9de605103 pristine_git_object: 22368e44adb1b3ecff58d2b92592710335a062b9 src/mistralai/client/models/agentscompletionstreamrequest.py: id: 1b73f90befc2 - last_write_checksum: sha1:8126924507b41754ec1d4a10613cf189f5ea0aea + last_write_checksum: sha1:02fd1cf62fc203635099ad60fb9b41e82a82e0f8 pristine_git_object: 37d46c79d8964d799679413e14122a5146799eb6 src/mistralai/client/models/agentupdaterequest.py: id: 2d5a3a437819 - last_write_checksum: sha1:97509eeb4cd25d31a0e1f3b4de1288580cb9a5cb + last_write_checksum: sha1:65fdf42d54199ad3b951089bdea26deca0134440 pristine_git_object: 261ac069ce4e2b630d39080edf47bf2ad510ffb4 src/mistralai/client/models/apiendpoint.py: id: 00b34ce0a24d @@ -2078,7 +1832,7 @@ trackedFiles: pristine_git_object: 3ba14ce78e01c92458477bb025b9e5ded074fd4d src/mistralai/client/models/audiochunk.py: id: ce5dce4dced2 - last_write_checksum: sha1:6d8ed87fd3f114b2b04aa15dd24d0dd5b1837215 + last_write_checksum: sha1:5b7ef3c96f0d8b240d1a7354379dbebd911604c3 pristine_git_object: 80d836f27ae65f30c6ca0e1d4d5d585bbf498cfd src/mistralai/client/models/audioencoding.py: id: b14e6a50f730 @@ -2098,7 +1852,7 @@ trackedFiles: pristine_git_object: 350643614e23002bc55e99e2d1807bedd80a0613 src/mistralai/client/models/basemodelcard.py: id: 556ebdc33276 - last_write_checksum: sha1:f524e61a160af83b20f7901afc585f61bfad6e05 + last_write_checksum: sha1:6ebd9dd362ad23d34eb35451af01897662854726 pristine_git_object: 8ce7f139b6018c4a7358a21534532cd3e741fa8a src/mistralai/client/models/batcherror.py: id: 1563e2a576ec @@ -2118,7 +1872,7 @@ trackedFiles: pristine_git_object: 2654dac04c126a933f6d045f43f16a30263750dc src/mistralai/client/models/batchjobstatus.py: id: 61e08cf5eea9 - last_write_checksum: sha1:9e042ccd0901fe4fc08fcc8abe5a3f3e1ffe9cbb + last_write_checksum: sha1:f90059b4aaead197100965c648114254e7dc4888 pristine_git_object: 4b28059ba71b394d91f32dba3ba538a73c9af7a5 src/mistralai/client/models/batchrequest.py: id: 6f36819eeb46 @@ -2126,7 +1880,7 @@ trackedFiles: pristine_git_object: 24f50a9af9a74f6bec7e8903a966d114966a36d3 src/mistralai/client/models/builtinconnectors.py: id: 2d276ce938dc - last_write_checksum: sha1:4e94744e3854d4cdc9d1272e4f1d9371f9829a5f + last_write_checksum: sha1:50d2b60942ca1d7c9868ce59bf01ed860c09f313 pristine_git_object: 6a3b2476d54096722eb3e7a271629d108028bd35 src/mistralai/client/models/chatclassificationrequest.py: id: afd9cdc71834 @@ -2134,11 +1888,11 @@ trackedFiles: pristine_git_object: 450810225bb43bbd1539768e291840a210489f0f src/mistralai/client/models/chatcompletionchoice.py: id: 7e6a512f6a04 - last_write_checksum: sha1:bc3fb866e2eb661b1619f118af459d18ba545d40 + last_write_checksum: sha1:dee3be3b6950e355b14cce5be6c34bd5d03ba325 pristine_git_object: 5d888cfd73b82097d647f2f5ecdbdf8beee2e098 src/mistralai/client/models/chatcompletionrequest.py: id: 9979805d8c38 - last_write_checksum: sha1:ccd9f3908c71d6fc3ad57f41301348918b977a6f + last_write_checksum: sha1:6442737fd5552e01ad78ab4cf8bc10e0d9c75d05 pristine_git_object: 30fce28d5e071797a7180753f2825d39cfeac362 src/mistralai/client/models/chatcompletionresponse.py: id: 669d996b8e82 @@ -2146,11 +1900,11 @@ trackedFiles: pristine_git_object: 60a1f561ff29c3bc28ee6aea69b60b9d47c51471 src/mistralai/client/models/chatcompletionstreamrequest.py: id: 18cb2b2415d4 - last_write_checksum: sha1:a067cc25d2e8c5feb146bdb0b69fb5186e77c416 + last_write_checksum: sha1:512f4c05b140757888db465e2bb30a0abcafb1d4 pristine_git_object: 21dad38bb83e9b334850645ffa24e1099b121f6c src/mistralai/client/models/chatmoderationrequest.py: id: 057aecb07275 - last_write_checksum: sha1:f93d1758dd8c0f123d8c52d162e3b4c8681bf121 + last_write_checksum: sha1:6c24f39ddd835278773bd72cb2676e8f1fd10e73 pristine_git_object: 631c914d1a4f4453024665eb0a8233ec7a070332 src/mistralai/client/models/checkpointout.py: id: 3866fe32cd7c @@ -2170,15 +1924,15 @@ trackedFiles: pristine_git_object: 89a137c374efc0f8b3ee49f3434f264705f69639 src/mistralai/client/models/classifierdetailedjobout.py: id: d8daeb39ef9f - last_write_checksum: sha1:d33e6a4672b33b6092caec50cc957d98e32058f7 + last_write_checksum: sha1:7e6df794c49d75785fac3bf01ea467a2dcbd224b pristine_git_object: 1de4534fcb12440a004e94bc0eced7483952581d src/mistralai/client/models/classifierftmodelout.py: id: 2903a7123b06 - last_write_checksum: sha1:4662ec585ade8347aeda4f020b7d31978bf8f9bb + last_write_checksum: sha1:78bfdfa3b9188c44fe4cd9cf18bce9e1d1a4cd48 pristine_git_object: a4572108674ea9c209b6224597878d5e824af686 src/mistralai/client/models/classifierjobout.py: id: e19e9c4416cc - last_write_checksum: sha1:0239761cb318518641281f584783bd2b42ec3340 + last_write_checksum: sha1:7384ea39ff4c341e8d84c3a4af664298b31c1440 pristine_git_object: ab1e261d573a30714042af3f20ed439ddbf1d819 src/mistralai/client/models/classifiertargetin.py: id: ed021de1c06c @@ -2198,7 +1952,7 @@ trackedFiles: pristine_git_object: e24c9ddecf60c38e146b8f94ad35be95b3ea2609 src/mistralai/client/models/codeinterpretertool.py: id: 950cd8f4ad49 - last_write_checksum: sha1:533ae809df90e14e4ef6e4e993e20e37f969f39f + last_write_checksum: sha1:9b720eaf4d7243e503e14350f457babbca9cf7af pristine_git_object: faf5b0b78f2d9981bb02eee0c28bba1fdba795b9 src/mistralai/client/models/completionargs.py: id: 3db008bcddca @@ -2214,7 +1968,7 @@ trackedFiles: pristine_git_object: 9790db6fe35e0043f3240c0f7e8172d36dee96f5 src/mistralai/client/models/completiondetailedjobout.py: id: 9bc38dcfbddf - last_write_checksum: sha1:df43d27716d99b6886a2b2a389e4c7b8c0b61630 + last_write_checksum: sha1:0b0f7114471e650b877de2e149b69e772d29905f pristine_git_object: 85c0c803cf809338900b7b8dcde774d731b67f8f src/mistralai/client/models/completionevent.py: id: c68817e7e190 @@ -2222,11 +1976,11 @@ trackedFiles: pristine_git_object: 52db911eeb62ec7906b396d6936e3c7a0908bb76 src/mistralai/client/models/completionftmodelout.py: id: 0f5277833b3e - last_write_checksum: sha1:d125468e84529042a19e29d1c34aef70318ddf54 + last_write_checksum: sha1:6ae50b3172f358796cfeb154c7e59f9cdde39e61 pristine_git_object: ccecbb6a59f2994051708e66bce7ece3598a786f src/mistralai/client/models/completionjobout.py: id: 712e6c524f9a - last_write_checksum: sha1:4ca927d2eb17e2f2fe588fd22f6aaa32a4025b07 + last_write_checksum: sha1:4f66641e3d765df1db88554b4399eded4625e08d pristine_git_object: ecd95bb9c93412b222659e6f369d3ff7e13c8bb2 src/mistralai/client/models/completionresponsestreamchoice.py: id: 5969a6bc07f3 @@ -2254,11 +2008,11 @@ trackedFiles: pristine_git_object: f51407bf2a363f705b0b61ed7be4ef6249525af5 src/mistralai/client/models/conversationevents.py: id: 8c8b08d853f6 - last_write_checksum: sha1:e0d920578ca14fa186b3efeee69ed03f7a2aa119 + last_write_checksum: sha1:4d7e8087fa9a074ed2747131c3753e723ba03e0b pristine_git_object: 308588a1f094631935e4229f5538c5092f435d2c src/mistralai/client/models/conversationhistory.py: id: 60a51ff1682b - last_write_checksum: sha1:ed60e311224c3ada9c3768335394a5b338342433 + last_write_checksum: sha1:637f7302571f51bcb5d65c51e6b6e377e8895b96 pristine_git_object: 40bd1e7220160f54b0ab938b3627c77fb4d4f9ef src/mistralai/client/models/conversationinputs.py: id: 711b769f2c40 @@ -2270,11 +2024,11 @@ trackedFiles: pristine_git_object: 1ea05369b95fdaa7d7ae75398669f88826e5bb26 src/mistralai/client/models/conversationrequest.py: id: 58e3ae67f149 - last_write_checksum: sha1:20339231abbf60fb160f2dc24941860304c702fd + last_write_checksum: sha1:0e3cdc7cb34cc8c7f646cc7c2869349747cfd47e pristine_git_object: e3211c4c7b20c162473e619fad6dc0c6cea6b571 src/mistralai/client/models/conversationresponse.py: id: ad7a8472c7bf - last_write_checksum: sha1:50fdea156c2f2ce3116d41034094c071a3e136fa + last_write_checksum: sha1:ae6b273f3b1d1aff149d269a19c99d495fdf263e pristine_git_object: 32d0f28f101f51a3ca79e4d57f4913b1c420b189 src/mistralai/client/models/conversationrestartrequest.py: id: 681d90d50514 @@ -2286,7 +2040,7 @@ trackedFiles: pristine_git_object: 689815ebcfe577a1698938c9ccbf100b5d7995f8 src/mistralai/client/models/conversationstreamrequest.py: id: 58d633507527 - last_write_checksum: sha1:9cb79120c78867e12825ac4d504aa55ee5827168 + last_write_checksum: sha1:d4cda0957f6d09ed991e3570b6e8ef81d3cf62af pristine_git_object: 219230a2a8dd7d42cc7f5613ca22cec5fa872750 src/mistralai/client/models/conversationusageinfo.py: id: 6685e3b50b50 @@ -2306,11 +2060,11 @@ trackedFiles: pristine_git_object: 5aa8b68fe3680d3b51127d6a6b6068b1303756e8 src/mistralai/client/models/deltamessage.py: id: 68f53d67a140 - last_write_checksum: sha1:52296fa6d7fc3788b64dcb47aadd0818bcb86e11 + last_write_checksum: sha1:db65faf32a4abc2396eb1f694d3245fcc4173e2f pristine_git_object: 0ae56da86f645e5a0db2a0aa4579342610243300 src/mistralai/client/models/documentlibrarytool.py: id: 3eb3c218f457 - last_write_checksum: sha1:af01ec63a1c5eb7c332b82b3ec1d3553891614c2 + last_write_checksum: sha1:3f3dafea3df855f1fccaa6ece64df55b40b2d4f7 pristine_git_object: 861a58d38125ca5af11772ebde39a7c57c39ad9c src/mistralai/client/models/documentout.py: id: 7a85b9dca506 @@ -2390,7 +2144,7 @@ trackedFiles: pristine_git_object: 4a9678e5aa7405cbe09f59ffbdb6c7927396f06a src/mistralai/client/models/files_api_routes_upload_fileop.py: id: f13b84de6fa7 - last_write_checksum: sha1:3dc679de7b41abb4b0710ade631e818621b6f3bc + last_write_checksum: sha1:2ca94437630dddc55c6dd624d715963b19b97a73 pristine_git_object: 723c6cc264613b3670ac999829e66131b8424849 src/mistralai/client/models/fileschema.py: id: 19cde41ca32a @@ -2418,7 +2172,7 @@ trackedFiles: pristine_git_object: f5b8b2ed45b56d25b387da44c398ae79f3a52c73 src/mistralai/client/models/ftclassifierlossfunction.py: id: d21e2a36ab1f - last_write_checksum: sha1:69e08ab728e095b8e3846ed2dc142aa1e82a864a + last_write_checksum: sha1:9554b17b3139b54975aae989fb27e1c369bee4cd pristine_git_object: c4ef66e0fe69edace4912f2708f69a6e606c0654 src/mistralai/client/models/ftmodelcapabilitiesout.py: id: f70517be97d4 @@ -2426,7 +2180,7 @@ trackedFiles: pristine_git_object: be31aa3c14fb8fe9154ad8f54e9bf43f586951c7 src/mistralai/client/models/ftmodelcard.py: id: c4f15eed2ca2 - last_write_checksum: sha1:a6a71ce4a89688cb4780697e299a4274f7323e24 + last_write_checksum: sha1:ab559da7dd290e4d2be5c6a3398732de887b2a74 pristine_git_object: 36cb723df8bcde355e19a55105932298a8e2e33a src/mistralai/client/models/function.py: id: 32275a9d8fee @@ -2446,7 +2200,7 @@ trackedFiles: pristine_git_object: ac9e6227647b28bfd135c35bd32ca792d8dd414b src/mistralai/client/models/functioncallevent.py: id: 23b120b8f122 - last_write_checksum: sha1:535874a4593ce1f40f9683fa85159e4c4274f3ee + last_write_checksum: sha1:c0226ca734320b628223f5c5206477b224dff15e pristine_git_object: 4e040585285985cebc7e26ac402b6df8f4c063bb src/mistralai/client/models/functionname.py: id: 000acafdb0c0 @@ -2458,15 +2212,15 @@ trackedFiles: pristine_git_object: a843bf9bdd82b5cf3907e2172ed793a391c5cba2 src/mistralai/client/models/functiontool.py: id: 2e9ef5800117 - last_write_checksum: sha1:8ab806567a2ab6c2e04cb4ce394cbff2ae7aad50 + last_write_checksum: sha1:af5e38a4498149f46abd63eda97f9ccfb66a1fa3 pristine_git_object: 74b50d1bcd2bc0af658bf5293c8cc7f328644fa1 src/mistralai/client/models/githubrepositoryin.py: id: eef26fbd2876 - last_write_checksum: sha1:3b64fb4f34e748ef71fa92241ecdd1c73aa9485a + last_write_checksum: sha1:7736d0a475b47049c35aec59254c5d47b3ae609b pristine_git_object: e56fef9ba187792238991cc9373a7d2ccf0b8c0d src/mistralai/client/models/githubrepositoryout.py: id: d2434a167623 - last_write_checksum: sha1:d2be5c474d3a789491cad50b95e3f25933b0c66a + last_write_checksum: sha1:5d9625805bf6eb3c061ebdd73433ca2001e26cb1 pristine_git_object: e3aa9ebc52e8613b15e3ff92a03593e2169dc935 src/mistralai/client/models/httpvalidationerror.py: id: 4099f568a6f8 @@ -2474,7 +2228,7 @@ trackedFiles: pristine_git_object: 34d9b54307db818e51118bc448032e0476688a35 src/mistralai/client/models/imagegenerationtool.py: id: e1532275faa0 - last_write_checksum: sha1:7eaea320c1b602df2e761405644361820ca57d33 + last_write_checksum: sha1:e5d4c986062850ce3ba4f66a8347848332192c21 pristine_git_object: e09dba81314da940b2be64164e9b02d51e72f7b4 src/mistralai/client/models/imageurl.py: id: e4bbf5881fbf @@ -2482,7 +2236,7 @@ trackedFiles: pristine_git_object: 6e61d1ae2ec745774345c36e605748cf7733687b src/mistralai/client/models/imageurlchunk.py: id: 746fde62f637 - last_write_checksum: sha1:2311445f8c12347eab646f1b9ff7c4202642c907 + last_write_checksum: sha1:f6c19195337e3715fac3dc874abfc2333d661c8e pristine_git_object: f967a3c8ced6d5fb4b274454100134e41c5b7a5c src/mistralai/client/models/inputentries.py: id: 44727997dacb @@ -2490,15 +2244,15 @@ trackedFiles: pristine_git_object: 8ae29837a6c090fbe1998562684d3a372a9bdc31 src/mistralai/client/models/inputs.py: id: 84a8007518c7 - last_write_checksum: sha1:3ecd986b0f5a0de3a4c88f06758cfa51068253e9 + last_write_checksum: sha1:62cf4c19b48f68f57f30223d48d06e33d08ae096 pristine_git_object: fb0674760c1191f04e07f066e84ae9684a1431e3 src/mistralai/client/models/instructrequest.py: id: 6d3ad9f896c7 - last_write_checksum: sha1:5f8857f8fffe0b858cfc7bec268480003b562303 + last_write_checksum: sha1:5fabc65cccf9f17ffbd20cd176341b4d78b62a5c pristine_git_object: 1b2f269359700582687fdf4492ea3cef64da48bb src/mistralai/client/models/jobin.py: id: f4d176123ccc - last_write_checksum: sha1:c1ec4b9ea0930612aea1b1c5c5cd419379ab0687 + last_write_checksum: sha1:ae6b1d9bc202db7a49d29f85b75bffea605126c5 pristine_git_object: dc7684fcbecd558fc6e3e3f17c4000ec217285c1 src/mistralai/client/models/jobmetadataout.py: id: 805f41e3292a @@ -2522,23 +2276,23 @@ trackedFiles: pristine_git_object: 4536b738442ec9710ddf67f2faf7d30b094d8cd5 src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py: id: d175c6e32ecb - last_write_checksum: sha1:07bfc80146492e3608a5c1683e4530de296c0938 + last_write_checksum: sha1:c61f02640ec384778e6f6b1f08dcb31dc5c1fb82 pristine_git_object: b36d3c3ef5abb30abc886876bb66384ea41bab9e src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py: id: 81651291187a - last_write_checksum: sha1:eb265e749cc076b2d39c103df48ceeeda6da7f5a + last_write_checksum: sha1:80bc2d32588a115c4ac5571a3c1ffc8a24ab9d45 pristine_git_object: ece0d15a0654ec759904276ad5d95c5619ff016f src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py: id: d910fd8fe2d6 - last_write_checksum: sha1:7ee82991b49a615517b3323abbfc0e5928419890 + last_write_checksum: sha1:4f57772cda3075251f36c52a264ebce1328cb486 pristine_git_object: aa5a26098e084885e8c2f63944e7549969899d3c src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py: id: cf43028824bf - last_write_checksum: sha1:3fd6b5c7c9ae24d662abd5d3c7ea9699e295e5ff + last_write_checksum: sha1:e7bb3855dabfcaf7b92e6917e6da39246fc01282 pristine_git_object: 7e399b31354e4f09c43efbe9ffe3d938f6af0d8c src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py: id: e7ff4a4a4edb - last_write_checksum: sha1:176fef64d07c58da36ca6672ce5440508787dc84 + last_write_checksum: sha1:21d90c0a3fa151bd855d63ed241f518812f26f82 pristine_git_object: ed5938b039be719169e62e033b7735bde7e72503 src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py: id: 7cc1c80335a9 @@ -2546,11 +2300,11 @@ trackedFiles: pristine_git_object: e1be0ac00af889a38647b5f7e4f9d26ed09ee7c4 src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py: id: 6d9dc624aafd - last_write_checksum: sha1:1a8054c02cd8fd3c48954812e153e97efa58aaef + last_write_checksum: sha1:ad615dd8d493fec4f818f19e5745ff52575181aa pristine_git_object: a2b70b37e349c7f5fc6c687fbad015eb218de952 src/mistralai/client/models/jobsout.py: id: 22e91e9631a9 - last_write_checksum: sha1:f2a5aa117953410f0743c2dd024e4a462a0be105 + last_write_checksum: sha1:1bb48570e040fa9ad4408b41fef8ce4ec0bf52be pristine_git_object: 9087704f0660e39f662efbd36f39713202598c43 src/mistralai/client/models/jsonschema.py: id: e1fc1d8a434a @@ -2602,7 +2356,7 @@ trackedFiles: pristine_git_object: f677b4ddc96b51ecd777240844800b2634ca4358 src/mistralai/client/models/libraries_documents_upload_v1op.py: id: "744466971862" - last_write_checksum: sha1:63b6f82a3ed8b0655d3b5dea1811699553d62cb0 + last_write_checksum: sha1:d6b085e01eac97f404a01e137413e159390c1382 pristine_git_object: e2d59d9f1556ca77c0666b2bba3213ef5386f82a src/mistralai/client/models/libraries_get_v1op.py: id: d493f39e7ebb @@ -2662,7 +2416,7 @@ trackedFiles: pristine_git_object: e90d8aa0317e553bfc0cceb4a356cf9994ecfb60 src/mistralai/client/models/messageinputentry.py: id: c0a4b5179095 - last_write_checksum: sha1:def6a5ce05756f76f7da6504bfc25eea166b21ab + last_write_checksum: sha1:e9898424d5129750738adb6a049232162824282d pristine_git_object: 12a31097a88e90645c67a30451a379427cd4fcd3 src/mistralai/client/models/messageoutputcontentchunks.py: id: 2ed248515035 @@ -2674,7 +2428,7 @@ trackedFiles: pristine_git_object: d52e4e3e722ef221f565a0bd40f505385974a0e1 src/mistralai/client/models/messageoutputevent.py: id: a2bbf63615c6 - last_write_checksum: sha1:19dda725e29108b2110903e7883ce442e4e90bd4 + last_write_checksum: sha1:fb98c35064fd9c65fa8c8c0cbc59293067ac793f pristine_git_object: 3db7f5a0908a72f75f6f7303af4ad426a4909d84 src/mistralai/client/models/metricout.py: id: 92d33621dda7 @@ -2694,11 +2448,11 @@ trackedFiles: pristine_git_object: a6db80e73189addcb1e1951a093297e0523f5fa4 src/mistralai/client/models/modelconversation.py: id: fea0a651f888 - last_write_checksum: sha1:35fec41b1dac4a83bdf229de5dd0436916b144c8 + last_write_checksum: sha1:beade63589bde3cae79f471a71e3d04d3f132f97 pristine_git_object: 574f053d4186288980754ead28bb6ce19b414064 src/mistralai/client/models/modellist.py: id: 00693c7eec60 - last_write_checksum: sha1:4b9cdd48439f0ebc1aa6637cc93f445fc3e8a424 + last_write_checksum: sha1:d6ff956092c0c930a6db02cbe017bc473403639c pristine_git_object: 6a5209fa6dac59539be338e9ac6ffbefd18057ee src/mistralai/client/models/moderationobject.py: id: 132faad0549a @@ -2734,7 +2488,7 @@ trackedFiles: pristine_git_object: 2813a1ca4c94d690f248a318a9e35d655d80600c src/mistralai/client/models/ocrtableobject.py: id: d74dd0d2ddac - last_write_checksum: sha1:6821e39003e2ca46dc31384c2635e59763fddb98 + last_write_checksum: sha1:d562f3207193c7d5ef5d7b6175eba8006b6c3a73 pristine_git_object: 0c9091de8975d8bd8e673aadbb69a619b96d77e8 src/mistralai/client/models/ocrusageinfo.py: id: 272b7e1785d5 @@ -2762,7 +2516,7 @@ trackedFiles: pristine_git_object: e6a889de576f9e36db551a44d4ed3cf0c032e599 src/mistralai/client/models/realtimetranscriptionerrordetail.py: id: 5bd25cdf9c7a - last_write_checksum: sha1:49ff15eb41e8964ba3b150e2fca70f6529dee58f + last_write_checksum: sha1:471824f03586b63688de43608d6c756b8a156e11 pristine_git_object: 27bb8d872792723b06238b3f0eebed815948fd63 src/mistralai/client/models/realtimetranscriptionsession.py: id: 02517fa5411a @@ -2786,11 +2540,11 @@ trackedFiles: pristine_git_object: 7b0a35c44050b6fca868479e261805a77f33e230 src/mistralai/client/models/responsedoneevent.py: id: cf8a686bf82c - last_write_checksum: sha1:1fa63522f52a48a8e328dc5b3fe2c6f5206b04cc + last_write_checksum: sha1:25972ca80ff7fd7a0d6dfe98718be52580dacc61 pristine_git_object: 5405625692cb22c60a7b5f5a6f1b58cee5676576 src/mistralai/client/models/responseerrorevent.py: id: b286d74e8724 - last_write_checksum: sha1:f570a02791afb3fe60e99cbb4993c2d1f8dc476d + last_write_checksum: sha1:a4767e8820ae840559fc55c8fcd346dea41a386e pristine_git_object: c9ef95a04c91c32e7a7973309e2174b7e776f099 src/mistralai/client/models/responseformat.py: id: 6ab8bc8d22c0 @@ -2798,11 +2552,11 @@ trackedFiles: pristine_git_object: 5899b0175cefd4159eb680a3715a72fa78577ba4 src/mistralai/client/models/responseformats.py: id: c4462a05fb08 - last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b + last_write_checksum: sha1:863c7ec4c567d8f0c4e6305b47896424726e71be pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 src/mistralai/client/models/responsestartedevent.py: id: 24f54ee8b0f2 - last_write_checksum: sha1:5f7a4fad7c13f89b6e3672e422d5ef902aa5bf03 + last_write_checksum: sha1:1bd2a884b9f66eb811fc83d8c3644913dfa80ab1 pristine_git_object: dc6a10f91e2bb0d13a582ed03e7db2089b75bcf7 src/mistralai/client/models/responsevalidationerror.py: id: c244a88981e0 @@ -2810,7 +2564,7 @@ trackedFiles: pristine_git_object: bab5d0b70e0bb2ea567a16a1a7c5db839651836f src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py: id: 6fefa90ca351 - last_write_checksum: sha1:c34e2f55663cafe353e628fbd978a6be7ca6a467 + last_write_checksum: sha1:f7308b269e12b2554a27de9d41312097d0d55d82 pristine_git_object: 7fdcd37d5879aaca158f459df830a5a4dc55bfa0 src/mistralai/client/models/retrievefileout.py: id: 8bb5859aa0d0 @@ -2850,11 +2604,11 @@ trackedFiles: pristine_git_object: 181b327ea73a9bcf9fb90f95633da71cee96e599 src/mistralai/client/models/ssetypes.py: id: 1733e4765106 - last_write_checksum: sha1:1901bf6feee92ac100113e0a98dc0abe6e769375 + last_write_checksum: sha1:8154966cda84ddd5225936ee47c87df1143ee1f1 pristine_git_object: 796f0327cbb1372c1b2a817a7db39f8f185a59be src/mistralai/client/models/systemmessage.py: id: 500ef6e85ba1 - last_write_checksum: sha1:0e8e34fa66e4bb8bf1128b3007ef72bf33690e1e + last_write_checksum: sha1:4ca4da49acae5fb508584b1776d368eba7d4a119 pristine_git_object: 9e01bc57bd17a5ecf6be5fee3383bbb9e03a8ab5 src/mistralai/client/models/systemmessagecontentchunks.py: id: 297e8905d5af @@ -2886,23 +2640,23 @@ trackedFiles: pristine_git_object: 2c7f6cbf6ebfbbdcce7d82b885b5e07b6b52d066 src/mistralai/client/models/toolchoiceenum.py: id: c7798801f860 - last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 + last_write_checksum: sha1:d958ef93b303539226fdab0fd46c8ea21d24cea2 pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 src/mistralai/client/models/toolexecutiondeltaevent.py: id: df8f17cf3e07 - last_write_checksum: sha1:32257ebf812efe05763df71e498018d53884a32d + last_write_checksum: sha1:96147badaad7eb961d224b29d9134dba8fc35f49 pristine_git_object: 0268e6a0d9b3c25afe1022e61a630e926a50f135 src/mistralai/client/models/toolexecutiondoneevent.py: id: 514fdee7d99f - last_write_checksum: sha1:e99be4db8d87bb3aa9383c062846d35923721292 + last_write_checksum: sha1:bc439993c647ba471b7f1581f72e094b99bd5c14 pristine_git_object: 854baee98a119caf237ca0f39e4ddd7a36577771 src/mistralai/client/models/toolexecutionentry.py: id: 76db69eebe41 - last_write_checksum: sha1:1577af968f800b28a3da2006c44016a901532591 + last_write_checksum: sha1:4fb31b58961ce5f43233d91fb6efb89c624fab44 pristine_git_object: 839709fb8ea63cc358de9f5e71180bf9e94cf5a5 src/mistralai/client/models/toolexecutionstartedevent.py: id: 40fadb8e49a1 - last_write_checksum: sha1:49922a41c52e7f25eab26c8a34ec481c319c62b4 + last_write_checksum: sha1:d71ec6e61c1a881be8e02853f1ba450c36ec16e3 pristine_git_object: 66438cfc33b171868f597ff3f80a82a40d1396f4 src/mistralai/client/models/toolfilechunk.py: id: 26c8aadf416a @@ -2910,7 +2664,7 @@ trackedFiles: pristine_git_object: 62b5ffeda19a7fa614ccc5e390450f2452dd119d src/mistralai/client/models/toolmessage.py: id: 15f1af161031 - last_write_checksum: sha1:47b4b3426ecde263ce4f2918ff98135952447b40 + last_write_checksum: sha1:58370491597186ddf08c8648f1e24abc9c852c26 pristine_git_object: eae2d2aef69dc4134f42714d69625e7b6c43e8c9 src/mistralai/client/models/toolreferencechunk.py: id: 822e9f3e70de @@ -2930,31 +2684,31 @@ trackedFiles: pristine_git_object: 24c0b92e424e91d40972c0826553a7d344a8f932 src/mistralai/client/models/transcriptionsegmentchunk.py: id: d1e6f3bdc74b - last_write_checksum: sha1:5f16b05debe943432b69d390844216a703adf71a + last_write_checksum: sha1:23714fcd3791d09a7cc9a1bddd2f2203861d1bce pristine_git_object: c89d84fcf3842da23e1f710309446b4c592ceeb3 src/mistralai/client/models/transcriptionstreamdone.py: id: 066a9158ed09 - last_write_checksum: sha1:1f9a29e826dcc91ed0c7f08b69aaa81987d810b7 + last_write_checksum: sha1:09bd7a12a1985d377883be53815f88195dcdce57 pristine_git_object: add17f562385c3befc2932b16448901154372ca6 src/mistralai/client/models/transcriptionstreamevents.py: id: b50b3d74f16f - last_write_checksum: sha1:38d2ff40e9d4f5d09fa24eef0925d306cf434bf0 + last_write_checksum: sha1:651ae56098858fe8103ebd280bbdf2f74550794c pristine_git_object: caaf943a4662ecccab96183f63c226eaefee2882 src/mistralai/client/models/transcriptionstreameventtypes.py: id: 6f71f6fbf4c5 - last_write_checksum: sha1:38b7334aebf400e1abb2b20b0f2890880f0fc2f7 + last_write_checksum: sha1:d7671637063c19222c20b8334abf92abe3d30517 pristine_git_object: 4a910f0abca2912746cac60fd5a16bd5464f2457 src/mistralai/client/models/transcriptionstreamlanguage.py: id: e94333e4bc27 - last_write_checksum: sha1:9427411056a6239956ed3963af53c452e6fc4705 + last_write_checksum: sha1:7da587e67d635164bb986a3151a43b9a71b28c4d pristine_git_object: b47024adfca2d8da6f1f01ce573bcc339cbbc63a src/mistralai/client/models/transcriptionstreamsegmentdelta.py: id: c0a882ce57e5 - last_write_checksum: sha1:3cc8664a90c67c412fc3c58e6841571c476697ea + last_write_checksum: sha1:91631a724a84abf4fd603ba7a7630b5e7d970944 pristine_git_object: 7cfffb63f31d10a247e066c8f422e4f6af2cf489 src/mistralai/client/models/transcriptionstreamtextdelta.py: id: 6086dc081147 - last_write_checksum: sha1:d68e4b6cefa3a1492b461fbe17cff5c5216b58f5 + last_write_checksum: sha1:1c065d9a2874c4b315fe3cd191f94ef3e8f1cc43 pristine_git_object: ce279cf67ffc4e225ce37490f4ffd0c0d64fe993 src/mistralai/client/models/unarchiveftmodelout.py: id: 9dbc3bfb71ed @@ -2974,7 +2728,7 @@ trackedFiles: pristine_git_object: f1186d97357320f4bfc9d3f2a626f58d2b1a38d0 src/mistralai/client/models/usermessage.py: id: cb583483acf4 - last_write_checksum: sha1:1c15371710f18d7ed8f612cc450f4873f83f1eb9 + last_write_checksum: sha1:1e33aea6971835069dc9c862351d507f48d4ff8d pristine_git_object: 8d92cea803368e996d68dc2f3a2dadd1d06a4675 src/mistralai/client/models/validationerror.py: id: 15df3c7368ab @@ -2982,23 +2736,23 @@ trackedFiles: pristine_git_object: 352409be88a1175073e5438d6da86fc9a54896fc src/mistralai/client/models/wandbintegration.py: id: 4823c1e80942 - last_write_checksum: sha1:a76661e93fd3b6d8a3d210ef610a40ff1da203f7 + last_write_checksum: sha1:b33912c4e08c07b0139cc3c31f93e899f797b5f2 pristine_git_object: 89489fb4527c6515a609bcb533ef59ab516c7a38 src/mistralai/client/models/wandbintegrationout.py: id: 6b103d74195c - last_write_checksum: sha1:e648c37d559f8cec36b3c8e06979d8ac053a2ad6 + last_write_checksum: sha1:f64af59d7fe3d068e185776b01d43b7fdab1f129 pristine_git_object: a7f9afeb6683a173115371a686af5f95e2d29056 src/mistralai/client/models/websearchpremiumtool.py: id: bfe88af887e3 - last_write_checksum: sha1:af6e2fae78c2f22b98d58ab55b365d1688dba8cb + last_write_checksum: sha1:689087bc6c49bbc8b286e5b0155a6e5f6a1dc47d pristine_git_object: 8d2d4b5dfea50a34ac744181790bf5db84809b1c src/mistralai/client/models/websearchtool.py: id: 26b0903423e5 - last_write_checksum: sha1:49295d52d59e914620dedf9d22fb2290896039cf + last_write_checksum: sha1:93015f750a125a8297f9455278ebe482794ba958 pristine_git_object: ba4cc09f84faebb438a631db6ac328fea2ced609 src/mistralai/client/models_.py: id: 1d277958a843 - last_write_checksum: sha1:8f76c2395cb534e94366033007df24bf56c43ac7 + last_write_checksum: sha1:987921077f5b5535c39a21216585fc1bf9aa8811 pristine_git_object: 5ef9da096e58023aaa582f31717b4ee7a4b720b0 src/mistralai/client/ocr.py: id: 2f804a12fc62 diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index b47a192d..20576b9d 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -8,11 +8,13 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true - nameResolutionFeb2025: false + nameResolutionFeb2025: true parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true - securityFeb2025: false - sharedErrorComponentsApr2025: false + securityFeb2025: true + sharedErrorComponentsApr2025: true + methodSignaturesApr2024: true + sharedNestedComponentsJan2026: true auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -26,7 +28,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0a1 + version: 2.0.0a2 additionalDependencies: dev: pytest: ^8.2.2 @@ -47,9 +49,12 @@ python: envVarPrefix: MISTRAL fixFlags: responseRequiredSep2024: true + flatAdditionalProperties: true flattenGlobalSecurity: true flattenRequests: true flatteningOrder: parameters-first + forwardCompatibleEnumsByDefault: true + forwardCompatibleUnionsByDefault: tagged-only imports: option: openapi paths: @@ -68,7 +73,7 @@ python: outputModelSuffix: output packageManager: uv packageName: mistralai - preApplyUnionDiscriminators: false + preApplyUnionDiscriminators: true pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat diff --git a/.speakeasy/generated-files-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock b/.speakeasy/generated-files-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock new file mode 100644 index 00000000..d6937e41 --- /dev/null +++ b/.speakeasy/generated-files-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock @@ -0,0 +1,799 @@ +src/mistralai/client/_hooks/sdkhooks.py +docs/models/messageoutputeventcontent.md +docs/models/classificationresponse.md +docs/models/tooltypes.md +docs/models/toolexecutionstartedevent.md +docs/models/unarchiveftmodeloutobject.md +src/mistralai/client/models/conversationrequest.py +docs/models/agentconversationobject.md +src/mistralai/client/models/listlibraryout.py +docs/models/agentsapiv1agentsupdaterequest.md +src/mistralai/client/models/agentaliasresponse.py +docs/models/embeddingresponse.md +docs/models/agentsapiv1agentsgetversionrequest.md +src/mistralai/client/models/libraryin.py +docs/models/agentscompletionstreamrequest.md +docs/models/function.md +docs/models/agentsapiv1agentsgetagentversion.md +docs/models/imagegenerationtool.md +docs/models/classifiertargetin.md +src/mistralai/client/models/realtimetranscriptionsession.py +src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py +docs/models/agentsapiv1conversationsgetrequest.md +docs/models/messageoutputentry.md +docs/models/classificationrequestinputs.md +docs/models/chatcompletionrequestmessage.md +docs/models/thinking.md +src/mistralai/client/models/conversationinputs.py +docs/models/functionresultentry.md +docs/models/fimcompletionstreamrequeststop.md +docs/models/librariesupdatev1request.md +src/mistralai/client/models/agents_api_v1_conversations_appendop.py +docs/models/paginationinfo.md +docs/models/agenthandoffentry.md +docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md +docs/models/moderationresponse.md +docs/models/toolexecutionentryobject.md +docs/models/completionresponsestreamchoicefinishreason.md +docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md +docs/models/modelcapabilities.md +docs/models/responseformats.md +docs/models/agentupdaterequest.md +src/mistralai/client/models/transcriptionstreamsegmentdelta.py +docs/models/sharingin.md +docs/models/responseformat.md +docs/models/imageurl.md +src/mistralai/client/models/processingstatusout.py +docs/models/messageoutputevent.md +src/mistralai/client/models/conversationusageinfo.py +src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +src/mistralai/client/models/agents_api_v1_agents_get_versionop.py +src/mistralai/client/models/libraries_documents_get_v1op.py +docs/models/attributes.md +docs/models/agentscompletionrequeststop.md +src/mistralai/client/models/moderationresponse.py +src/mistralai/client/models/classifiertrainingparametersin.py +docs/models/audiochunk.md +src/mistralai/client/models/ocrrequest.py +src/mistralai/client/models/file.py +src/mistralai/client/models/ocrresponse.py +src/mistralai/client/models/classifiertargetin.py +docs/models/agentconversationagentversion.md +docs/models/classificationtargetresult.md +docs/models/tableformat.md +docs/models/classifiertrainingparameters.md +src/mistralai/client/models/shareenum.py +.vscode/settings.json +docs/models/messageoutputentrycontent.md +py.typed +docs/models/agentscompletionrequest.md +docs/models/completionjoboutrepository.md +src/mistralai/client/models/batchrequest.py +docs/models/entry.md +src/mistralai/client/models/modelcapabilities.py +docs/models/file.md +src/mistralai/client/models/mistralpromptmode.py +scripts/publish.sh +docs/models/agentscompletionstreamrequestmessage.md +docs/models/messageinputentrytype.md +src/mistralai/client/__init__.py +src/mistralai/client/_version.py +src/mistralai/client/models/ocrpageobject.py +docs/models/ocrimageobject.md +src/mistralai/client/basesdk.py +docs/models/ocrpagedimensions.md +src/mistralai/client/httpclient.py +docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md +src/mistralai/client/py.typed +src/mistralai/client/types/__init__.py +docs/models/agentsapiv1agentsupdateversionrequest.md +src/mistralai/client/types/basemodel.py +src/mistralai/client/utils/__init__.py +src/mistralai/client/utils/annotations.py +src/mistralai/client/utils/datetimes.py +src/mistralai/client/utils/enums.py +src/mistralai/client/models/inputs.py +src/mistralai/client/utils/eventstreaming.py +src/mistralai/client/utils/forms.py +src/mistralai/client/utils/headers.py +src/mistralai/client/models/legacyjobmetadataout.py +src/mistralai/client/utils/logger.py +src/mistralai/client/utils/metadata.py +src/mistralai/client/utils/queryparams.py +src/mistralai/client/utils/requestbodies.py +src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +docs/models/toolexecutionentryname.md +src/mistralai/client/utils/retries.py +src/mistralai/client/utils/security.py +src/mistralai/client/models/toolfilechunk.py +src/mistralai/client/utils/serializers.py +src/mistralai/client/models/transcriptionstreamdone.py +src/mistralai/client/utils/unmarshal_json_response.py +src/mistralai/client/utils/url.py +src/mistralai/client/utils/values.py +src/mistralai/client/models/responsevalidationerror.py +src/mistralai/client/models/retrievefileout.py +src/mistralai/client/models/mistralerror.py +docs/models/apiendpoint.md +src/mistralai/client/models/sdkerror.py +docs/models/jobsout.md +src/mistralai/client/models/no_response_error.py +docs/models/conversationrestartstreamrequesthandoffexecution.md +docs/models/functiontool.md +docs/models/agentsapiv1conversationsappendstreamrequest.md +docs/models/agenthandoffentryobject.md +docs/models/transcriptionstreameventtypes.md +docs/models/messageoutputeventrole.md +src/mistralai/client/models/modellist.py +docs/models/responseretrievemodelv1modelsmodelidget.md +docs/models/referencechunktype.md +docs/models/chatclassificationrequest.md +src/mistralai/client/models/responseformats.py +docs/models/librariesdocumentsdeletev1request.md +src/mistralai/client/models/conversationresponse.py +src/mistralai/client/models/completionargsstop.py +src/mistralai/client/models/contentchunk.py +docs/models/classifierdetailedjoboutstatus.md +docs/models/listlibraryout.md +docs/models/transcriptionstreamevents.md +src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +docs/models/chatcompletionrequeststop.md +src/mistralai/client/models/libraries_update_v1op.py +src/mistralai/client/models/websearchtool.py +src/mistralai/client/models/classifiertrainingparameters.py +docs/models/validationerror.md +src/mistralai/client/models/documentlibrarytool.py +src/mistralai/client/models/responsestartedevent.py +docs/models/document.md +src/mistralai/client/models/filesignedurl.py +src/mistralai/client/models/fimcompletionresponse.py +docs/models/agentscompletionstreamrequeststop.md +docs/models/agenthandoffentrytype.md +docs/models/conversationmessages.md +src/mistralai/client/models/responsedoneevent.py +docs/models/completionresponsestreamchoice.md +docs/models/fimcompletionresponse.md +src/mistralai/client/models/unarchiveftmodelout.py +src/mistralai/client/conversations.py +src/mistralai/client/models/toolexecutionstartedevent.py +src/mistralai/client/models/jsonschema.py +docs/models/completionftmodelout.md +src/mistralai/client/models/fimcompletionstreamrequest.py +docs/models/chatcompletionrequesttoolchoice.md +src/mistralai/client/models/tooltypes.py +src/mistralai/client/models/functionname.py +docs/models/functionresultentryobject.md +docs/models/classifierjobout.md +src/mistralai/client/models/listfilesout.py +src/mistralai/client/models/agents_api_v1_agents_listop.py +src/mistralai/client/models/imageurl.py +src/mistralai/client/models/chatcompletionchoice.py +src/mistralai/client/sdk.py +docs/models/conversationrequesttool.md +docs/models/chatcompletionrequest.md +docs/models/librariesdeletev1request.md +src/mistralai/client/models/chatcompletionresponse.py +docs/models/toolreferencechunktool.md +src/mistralai/client/_hooks/types.py +src/mistralai/client/models/agents_api_v1_conversations_deleteop.py +docs/models/systemmessagecontentchunks.md +src/mistralai/client/models/sharingin.py +docs/models/completionjoboutobject.md +docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md +src/mistralai/client/models/functionresultentry.py +docs/models/agentsapiv1conversationsdeleterequest.md +docs/models/githubrepositoryout.md +docs/models/retrievemodelv1modelsmodelidgetrequest.md +docs/models/conversationstreamrequest.md +docs/models/agentsapiv1conversationsmessagesrequest.md +docs/models/sharingout.md +docs/models/archiveftmodelout.md +docs/models/listdocumentout.md +docs/models/toolreferencechunk.md +docs/models/instructrequestinputs.md +src/mistralai/client/models/deltamessage.py +src/mistralai/client/models/tool.py +src/mistralai/client/beta_agents.py +src/mistralai/client/models/toolcall.py +docs/models/jobin.md +src/mistralai/client/models/libraries_documents_upload_v1op.py +src/mistralai/client/models/toolexecutiondoneevent.py +docs/models/conversationrequestagentversion.md +docs/models/listsharingout.md +docs/models/completiondetailedjoboutrepository.md +docs/models/completionftmodeloutobject.md +src/mistralai/client/models/agentcreationrequest.py +docs/models/functioncallentry.md +src/mistralai/client/models/agents_api_v1_conversations_getop.py +src/mistralai/client/models/filepurpose.py +src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py +src/mistralai/client/models/jobsout.py +docs/models/agentsapiv1conversationsappendrequest.md +docs/models/jobsapiroutesbatchgetbatchjobsrequest.md +src/mistralai/client/models/audiotranscriptionrequest.py +src/mistralai/client/models/agents_api_v1_agents_update_versionop.py +src/mistralai/client/models/prediction.py +docs/models/conversationinputs.md +docs/models/agenthandoffdoneevent.md +docs/models/finetuneablemodeltype.md +src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py +src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +docs/models/conversationrestartrequest.md +src/mistralai/client/models/ocrimageobject.py +docs/models/security.md +src/mistralai/client/models/libraryinupdate.py +docs/models/fimcompletionrequest.md +docs/models/ocrusageinfo.md +docs/models/completionjoboutintegration.md +src/mistralai/client/libraries.py +src/mistralai/client/models/wandbintegration.py +src/mistralai/client/models/ocrpagedimensions.py +src/mistralai/client/models/jobin.py +docs/models/conversationrestartstreamrequestagentversion.md +src/mistralai/client/models/libraries_documents_reprocess_v1op.py +docs/models/agentsapiv1agentsgetrequest.md +src/mistralai/client/models/paginationinfo.py +src/mistralai/client/models/jobmetadataout.py +docs/models/assistantmessage.md +src/mistralai/client/models/conversationappendstreamrequest.py +docs/models/librariesdocumentsgettextcontentv1request.md +docs/models/realtimetranscriptionerror.md +src/mistralai/client/models/completiondetailedjobout.py +src/mistralai/client/fine_tuning_jobs.py +src/mistralai/client/models/documentout.py +docs/models/librariesgetv1request.md +docs/models/referencechunk.md +src/mistralai/client/models/completiontrainingparameters.py +src/mistralai/client/agents.py +src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py +src/mistralai/client/models/toolchoice.py +docs/models/requestsource.md +docs/models/embeddingrequestinputs.md +src/mistralai/client/models/imagegenerationtool.py +src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py +docs/models/jobsoutobject.md +docs/models/librariesdocumentsreprocessv1request.md +src/mistralai/client/models/audiotranscriptionrequeststream.py +docs/models/tool.md +src/mistralai/client/models/uploadfileout.py +src/mistralai/client/models/timestampgranularity.py +src/mistralai/client/models/metricout.py +docs/models/jobmetadataout.md +src/mistralai/client/models/files_api_routes_upload_fileop.py +docs/models/chatmoderationrequestinputs1.md +src/mistralai/client/models/transcriptionstreameventtypes.py +src/mistralai/client/models/completionchunk.py +src/mistralai/client/models/conversationevents.py +docs/models/agent.md +src/mistralai/client/models/documenttextcontent.py +docs/models/embeddingresponsedata.md +docs/models/codeinterpretertool.md +src/mistralai/client/models/deletemodelout.py +docs/models/agenttool.md +src/mistralai/client/models/completionresponsestreamchoice.py +src/mistralai/client/models/audiochunk.py +docs/models/functioncallevent.md +docs/models/transcriptionstreamtextdelta.md +docs/models/completiontrainingparametersin.md +docs/models/conversationappendrequesthandoffexecution.md +docs/models/chatcompletionchoicefinishreason.md +src/mistralai/client/models/libraries_documents_get_status_v1op.py +docs/models/libraryinupdate.md +src/mistralai/client/models/modelconversation.py +docs/models/completiondetailedjobout.md +docs/models/realtimetranscriptionsessioncreated.md +docs/models/classifierjoboutobject.md +docs/models/filesapiroutesretrievefilerequest.md +src/mistralai/client/models/trainingfile.py +docs/models/multipartbodyparams.md +src/mistralai/client/models/libraries_delete_v1op.py +docs/models/sampletype.md +src/mistralai/client/models/functioncallevent.py +src/mistralai/client/models/imageurlchunk.py +src/mistralai/client/models/libraries_documents_delete_v1op.py +src/mistralai/client/models/agentconversation.py +src/mistralai/client/models/chatclassificationrequest.py +docs/models/ftmodelcapabilitiesout.md +docs/models/classifierftmodelout.md +docs/models/deletemodelv1modelsmodeliddeleterequest.md +docs/models/messageoutputentryrole.md +docs/models/eventout.md +docs/models/systemmessage.md +src/mistralai/client/models/sampletype.py +docs/models/conversationevents.md +docs/models/fileschema.md +src/mistralai/client/models/agentscompletionrequest.py +src/mistralai/client/models/chatmoderationrequest.py +src/mistralai/client/models/classifierftmodelout.py +docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md +docs/models/chatcompletionresponse.md +src/mistralai/client/models/toolmessage.py +src/mistralai/client/accesses.py +src/mistralai/client/models/source.py +docs/models/documenturlchunk.md +docs/models/updateftmodelin.md +src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +docs/models/toolreferencechunktype.md +src/mistralai/client/models/files_api_routes_get_signed_urlop.py +src/mistralai/client/models/responseerrorevent.py +docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md +docs/models/thinkchunk.md +docs/models/agentcreationrequesttool.md +docs/models/completiondetailedjoboutobject.md +src/mistralai/client/models/filechunk.py +docs/models/agentcreationrequest.md +docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md +docs/models/utils/retryconfig.md +docs/models/loc.md +docs/models/filesignedurl.md +src/mistralai/client/models/embeddingdtype.py +docs/models/chatcompletionstreamrequest.md +docs/models/audioformat.md +docs/models/transcriptionstreamsegmentdelta.md +docs/models/inputsmessage.md +docs/models/instructrequest.md +src/mistralai/client/models/batchjobout.py +docs/models/classifiertargetout.md +docs/models/filesapiroutesgetsignedurlrequest.md +docs/models/conversationappendrequest.md +docs/models/legacyjobmetadataoutobject.md +src/mistralai/client/models/messageoutputentry.py +docs/models/messageinputentryobject.md +src/mistralai/client/models/embeddingresponse.py +src/mistralai/client/models/documenturlchunk.py +docs/models/usermessage.md +src/mistralai/client/models/apiendpoint.py +src/mistralai/client/models/batchjobstatus.py +docs/models/jobsapiroutesbatchgetbatchjobrequest.md +docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md +docs/models/wandbintegration.md +docs/models/conversationmessagesobject.md +docs/models/utils/retryconfig.md +docs/models/fimcompletionstreamrequest.md +docs/models/batchrequest.md +docs/models/agentsapiv1conversationslistresponse.md +docs/models/conversationhistory.md +docs/sdks/agents/README.md +docs/models/transcriptionresponse.md +src/mistralai/client/models/files_api_routes_download_fileop.py +src/mistralai/client/models/embeddingrequest.py +src/mistralai/client/models/transcriptionresponse.py +src/mistralai/client/models/libraries_documents_list_v1op.py +src/mistralai/client/models/githubrepositoryin.py +docs/models/librariesdocumentsgetstatusv1request.md +docs/models/modelconversationtool.md +.gitattributes +docs/models/functioncallentryarguments.md +src/mistralai/client/models/ftclassifierlossfunction.py +src/mistralai/client/batch.py +docs/models/classificationrequest.md +src/mistralai/client/models/realtimetranscriptionerrordetail.py +docs/models/hyperparameters.md +docs/models/utils/retryconfig.md +docs/models/moderationobject.md +docs/models/classifierjoboutstatus.md +docs/models/agentupdaterequesttool.md +docs/models/chatcompletionstreamrequestmessage.md +docs/models/completiondetailedjoboutintegration.md +src/mistralai/client/models/transcriptionstreamtextdelta.py +src/mistralai/client/models/libraries_get_v1op.py +docs/models/agentscompletionrequesttoolchoice.md +src/mistralai/client/models/deletefileout.py +docs/models/completionevent.md +src/mistralai/client/chat.py +src/mistralai/client/models/completiontrainingparametersin.py +docs/models/librariesdocumentsupdatev1request.md +docs/models/instructrequestmessage.md +src/mistralai/client/models/documentupdatein.py +docs/models/toolfilechunk.md +src/mistralai/client/models/messageinputcontentchunks.py +src/mistralai/client/models/files_api_routes_delete_fileop.py +docs/models/utils/retryconfig.md +docs/models/assistantmessagerole.md +docs/sdks/transcriptions/README.md +docs/models/librariessharedeletev1request.md +src/mistralai/client/models/moderationobject.py +docs/models/unarchiveftmodelout.md +src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py +docs/models/messageoutputentrytype.md +docs/models/functioncall.md +docs/models/toolexecutiondeltaevent.md +src/mistralai/client/models/realtimetranscriptionerror.py +docs/models/agentsapiv1agentslistrequest.md +src/mistralai/client/models/websearchpremiumtool.py +src/mistralai/client/models/realtimetranscriptionsessionupdated.py +src/mistralai/client/models/libraries_documents_get_text_content_v1op.py +docs/models/agentscompletionstreamrequesttoolchoice.md +docs/models/textchunk.md +docs/models/toolcall.md +docs/models/assistantmessagecontent.md +src/mistralai/client/models/chatcompletionrequest.py +src/mistralai/client/models/usermessage.py +docs/models/outputcontentchunks.md +docs/models/librariesdocumentsuploadv1request.md +docs/models/entitytype.md +src/mistralai/client/models/basemodelcard.py +docs/models/toolexecutionentrytype.md +docs/models/shareenum.md +docs/models/imageurlunion.md +docs/models/conversationappendstreamrequest.md +docs/models/websearchpremiumtool.md +docs/models/utils/retryconfig.md +docs/models/fimcompletionrequeststop.md +src/mistralai/client/models/classificationtargetresult.py +src/mistralai/client/audio.py +docs/models/chatmoderationrequestinputs3.md +docs/models/response.md +src/mistralai/client/models/referencechunk.py +docs/models/jobinrepository.md +src/mistralai/client/models/files_api_routes_retrieve_fileop.py +src/mistralai/client/sdkconfiguration.py +src/mistralai/client/models/agents_api_v1_conversations_messagesop.py +src/mistralai/client/models/instructrequest.py +src/mistralai/client/models/classifiertargetout.py +docs/models/classifierdetailedjoboutobject.md +src/mistralai/client/models/inputentries.py +src/mistralai/client/models/toolchoiceenum.py +docs/models/chatcompletionstreamrequesttoolchoice.md +docs/models/agentconversation.md +docs/models/utils/retryconfig.md +src/mistralai/client/models/functioncall.py +docs/models/mistralpromptmode.md +docs/models/conversationresponseobject.md +src/mistralai/client/models/ocrtableobject.py +src/mistralai/client/models/toolexecutionentry.py +docs/models/classifierdetailedjobout.md +docs/models/conversationresponse.md +docs/models/agentsapiv1agentslistversionaliasesrequest.md +docs/models/conversationeventsdata.md +src/mistralai/client/models/ocrusageinfo.py +src/mistralai/client/models/ftmodelcard.py +src/mistralai/client/models/libraries_share_list_v1op.py +docs/models/modellistdata.md +docs/models/messageoutputcontentchunks.md +docs/models/modelconversation.md +docs/models/batchjobstatus.md +docs/models/encodingformat.md +docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md +docs/models/utils/retryconfig.md +docs/models/modellist.md +docs/models/textchunktype.md +docs/models/completionargs.md +docs/models/agenthandoffstartedevent.md +docs/models/basemodelcard.md +src/mistralai/client/models/classifierjobout.py +docs/models/batchjobout.md +docs/models/conversationstreamrequestagentversion.md +docs/models/filesapiroutesdownloadfilerequest.md +src/mistralai/client/models/fileschema.py +docs/models/completiontrainingparameters.md +docs/models/wandbintegrationout.md +docs/models/agentobject.md +src/mistralai/client/models/classifierdetailedjobout.py +src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +USAGE.md +docs/models/deltamessage.md +docs/models/messageinputentry.md +docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md +docs/models/filechunk.md +src/mistralai/client/models/agent.py +src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py +src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py +docs/models/classifierftmodeloutobject.md +src/mistralai/client/models/ftmodelcapabilitiesout.py +src/mistralai/client/models/listsharingout.py +src/mistralai/client/models/systemmessagecontentchunks.py +src/mistralai/client/models/agents_api_v1_agents_updateop.py +docs/models/retrievefileout.md +src/mistralai/client/models/agents_api_v1_conversations_historyop.py +src/mistralai/client/fim.py +docs/models/embeddingdtype.md +src/mistralai/client/models/conversationrestartstreamrequest.py +src/mistralai/client/models/completionargs.py +docs/models/toolexecutionstartedeventname.md +src/mistralai/client/models/transcriptionstreamlanguage.py +docs/models/librariessharelistv1request.md +src/mistralai/client/fine_tuning.py +docs/models/agentsapiv1conversationsrestartrequest.md +docs/models/conversationrestartstreamrequest.md +docs/models/transcriptionstreamlanguage.md +docs/models/toolexecutiondoneeventname.md +docs/models/classifierjoboutintegration.md +docs/models/classifiertrainingparametersin.md +src/mistralai/client/models/agentupdaterequest.py +docs/models/agentscompletionrequestmessage.md +docs/models/chatmoderationrequest.md +docs/models/chatcompletionchoice.md +docs/models/batchjoboutobject.md +docs/models/toolchoiceenum.md +docs/models/ocrrequest.md +src/mistralai/client/models/updateftmodelin.py +docs/models/classifierdetailedjoboutintegration.md +src/mistralai/client/models/agenthandoffdoneevent.py +src/mistralai/client/models/files_api_routes_list_filesop.py +src/mistralai/client/ocr.py +docs/models/embeddingrequest.md +src/mistralai/client/models/conversationstreamrequest.py +src/mistralai/client/models/thinkchunk.py +docs/models/toolchoice.md +docs/models/documentupload.md +docs/models/imageurlchunktype.md +docs/models/conversationrestartrequestagentversion.md +docs/models/transcriptionstreamdone.md +src/mistralai/client/models/libraryout.py +src/mistralai/client/models/conversationappendrequest.py +src/mistralai/client/models/audioformat.py +docs/models/conversationhistoryobject.md +docs/models/ftclassifierlossfunction.md +docs/models/websearchtool.md +docs/models/messageoutputentryobject.md +src/mistralai/client/models/batchjobsout.py +docs/models/agentsapiv1agentsdeleterequest.md +docs/models/libraryout.md +docs/models/batchjobsoutobject.md +docs/models/functionresultentrytype.md +src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py +docs/models/completionjoboutstatus.md +docs/models/documenttextcontent.md +docs/models/legacyjobmetadataout.md +docs/models/prediction.md +src/mistralai/client/models_.py +src/mistralai/client/models/sharingdelete.py +src/mistralai/client/models/usageinfo.py +docs/models/thinkchunktype.md +docs/models/agentsapiv1conversationshistoryrequest.md +src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +src/mistralai/client/models/responseformat.py +docs/models/builtinconnectors.md +docs/models/realtimetranscriptionsession.md +docs/models/documentlibrarytool.md +docs/models/toolfilechunktool.md +docs/models/messageinputcontentchunks.md +src/mistralai/client/models/checkpointout.py +src/mistralai/client/models/validationerror.py +docs/models/utils/retryconfig.md +docs/models/chatmoderationrequestinputs2.md +docs/models/format_.md +docs/sdks/batchjobs/README.md +docs/models/agentsapiv1conversationsrestartstreamrequest.md +docs/models/filesapiroutesdeletefilerequest.md +docs/models/toolexecutionentry.md +docs/models/metricout.md +src/mistralai/client/models/completionjobout.py +docs/models/deletefileout.md +docs/models/functioncallentrytype.md +docs/models/filepurpose.md +docs/models/transcriptionsegmentchunk.md +docs/models/usageinfo.md +docs/models/responsev1conversationsget.md +src/mistralai/client/models/ssetypes.py +src/mistralai/client/models/audioencoding.py +docs/models/librariesdocumentsgetsignedurlv1request.md +docs/models/timestampgranularity.md +docs/models/conversationrequesthandoffexecution.md +src/mistralai/client/transcriptions.py +src/mistralai/client/models/function.py +src/mistralai/client/models/toolexecutiondeltaevent.py +docs/models/conversationappendstreamrequesthandoffexecution.md +docs/models/realtimetranscriptionerrordetail.md +docs/models/toolexecutiondeltaeventname.md +src/mistralai/client/models/__init__.py +src/mistralai/client/models/codeinterpretertool.py +docs/models/utils/retryconfig.md +docs/models/completiondetailedjoboutstatus.md +docs/models/librariesdocumentsgetv1request.md +src/mistralai/client/models/messageoutputevent.py +src/mistralai/client/models/agentscompletionstreamrequest.py +src/mistralai/client/models/textchunk.py +docs/models/conversationstreamrequesttool.md +docs/models/systemmessagecontent.md +docs/models/agentsapiv1conversationslistrequest.md +docs/models/chatcompletionstreamrequeststop.md +docs/models/responseerrorevent.md +docs/models/usermessagecontent.md +docs/models/audioencoding.md +docs/models/messageinputentryrole.md +docs/models/inputentries.md +src/mistralai/client/models/agents_api_v1_conversations_restartop.py +src/mistralai/client/models/messageentries.py +docs/models/ocrpageobject.md +src/mistralai/client/models/completionevent.py +src/mistralai/client/models/batchjobin.py +src/mistralai/client/models/requestsource.py +src/mistralai/client/models/fimcompletionrequest.py +docs/models/utils/retryconfig.md +src/mistralai/client/models/sharingout.py +docs/models/messageentries.md +docs/models/jobsoutdata.md +src/mistralai/client/batch_jobs.py +src/mistralai/client/models/messageinputentry.py +docs/models/uploadfileout.md +src/mistralai/client/models/finetuneablemodeltype.py +docs/models/documentupdatein.md +docs/models/toolmessagecontent.md +docs/models/utils/retryconfig.md +docs/models/documentout.md +docs/models/functionname.md +src/mistralai/client/documents.py +src/mistralai/client/models/realtimetranscriptionsessioncreated.py +docs/models/conversationstreamrequesthandoffexecution.md +docs/models/ocrresponse.md +src/mistralai/client/models/libraries_share_create_v1op.py +docs/models/functioncallentryobject.md +docs/models/httpvalidationerror.md +src/mistralai/client/models/agents_api_v1_agents_getop.py +docs/models/responsedoneevent.md +docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md +docs/models/utils/retryconfig.md +src/mistralai/client/models/completionftmodelout.py +docs/models/utils/retryconfig.md +src/mistralai/client/files.py +docs/models/batchjobsout.md +docs/models/audiotranscriptionrequeststream.md +src/mistralai/client/models/functioncallentryarguments.py +docs/models/responsestartedevent.md +src/mistralai/client/models/agents_api_v1_agents_deleteop.py +docs/models/utils/retryconfig.md +docs/models/completionchunk.md +src/mistralai/client/models/agents_api_v1_conversations_listop.py +src/mistralai/client/models/archiveftmodelout.py +docs/models/agentaliasresponse.md +docs/models/realtimetranscriptionsessionupdated.md +docs/models/batcherror.md +docs/models/contentchunk.md +docs/models/source.md +docs/models/utils/retryconfig.md +docs/models/toolexecutiondoneevent.md +docs/models/realtimetranscriptionerrordetailmessage.md +docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md +docs/models/sharingdelete.md +docs/models/agentsapiv1agentscreateorupdatealiasrequest.md +docs/models/completionjobout.md +docs/models/conversationrequest.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/betaagents/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/conversations/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/libraries/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/accesses/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/documents/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/chat/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/classifiers/README.md +docs/models/utils/retryconfig.md +docs/sdks/embeddings/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/files/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/fim/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/finetuningjobs/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/models/README.md +docs/models/utils/retryconfig.md +docs/sdks/ocr/README.md +docs/models/processingstatusout.md +docs/models/completionargsstop.md +docs/models/ocrtableobject.md +src/mistralai/client/models/assistantmessage.py +src/mistralai/client/models/libraries_documents_update_v1op.py +src/mistralai/client/models/agenthandoffstartedevent.py +src/mistralai/client/models/eventout.py +src/mistralai/client/models/toolreferencechunk.py +docs/models/githubrepositoryin.md +src/mistralai/client/models/messageoutputcontentchunks.py +src/mistralai/client/models/agenthandoffentry.py +docs/models/jsonschema.md +docs/models/conversationrestartrequesthandoffexecution.md +docs/models/listfilesout.md +src/mistralai/client/models/transcriptionstreamevents.py +docs/models/ftmodelcard.md +docs/models/jobinintegration.md +src/mistralai/client/models/conversationrestartrequest.py +src/mistralai/client/models/encodingformat.py +docs/models/deltamessagecontent.md +src/mistralai/client/models/outputcontentchunks.py +docs/models/toolfilechunktype.md +src/mistralai/client/_hooks/__init__.py +src/mistralai/client/models/entitytype.py +docs/models/deletemodelout.md +src/mistralai/client/embeddings.py +docs/models/documenturlchunktype.md +docs/models/batchjobin.md +src/mistralai/client/models/wandbintegrationout.py +docs/models/transcriptionstreameventsdata.md +src/mistralai/client/models/classificationresponse.py +docs/models/trainingfile.md +src/mistralai/client/models/transcriptionsegmentchunk.py +docs/models/audiotranscriptionrequest.md +src/mistralai/client/models/githubrepositoryout.py +src/mistralai/client/models/functiontool.py +docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md +docs/models/conversationusageinfo.md +docs/models/ssetypes.md +src/mistralai/client/models/listdocumentout.py +docs/models/libraryin.md +src/mistralai/client/models/libraries_share_delete_v1op.py +src/mistralai/client/models/systemmessage.py +src/mistralai/client/models/chatcompletionstreamrequest.py +src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py +docs/models/filesapirouteslistfilesrequest.md +docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md +src/mistralai/client/models/security.py +docs/models/modelconversationobject.md +src/mistralai/client/models/conversationmessages.py +docs/models/output.md +src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py +src/mistralai/client/models/classificationrequest.py +docs/models/librariesdocumentslistv1request.md +docs/models/toolmessage.md +docs/models/agentsapiv1agentslistversionsrequest.md +src/mistralai/client/models/embeddingresponsedata.py +src/mistralai/client/models/conversationhistory.py +docs/models/librariessharecreatev1request.md +docs/models/messageinputentrycontent.md +src/mistralai/client/models/functioncallentry.py +src/mistralai/client/models/builtinconnectors.py +src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py +src/mistralai/client/models/httpvalidationerror.py +src/mistralai/client/classifiers.py +docs/models/transcriptionsegmentchunktype.md +docs/models/arguments.md +docs/models/checkpointout.md +src/mistralai/client/beta.py +docs/models/archiveftmodeloutobject.md +docs/models/jobsapiroutesbatchcancelbatchjobrequest.md +docs/models/imageurlchunk.md +src/mistralai/client/models/batcherror.py +docs/models/inputs.md diff --git a/MIGRATION.md b/MIGRATION.md index 4ab7f2ff..5fb16739 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -35,10 +35,22 @@ from mistralai.client.types import BaseModel ### What Stays the Same -- All method names and signatures remain identical - The `Mistral` client API is unchanged - All models (`UserMessage`, `AssistantMessage`, etc.) work the same way +### Type Name Changes + +Some type names have been updated for clarity and consistency: + +| Old Name | New Name | +|---|---| +| `Tools` | `ConversationRequestTool` | +| `ToolsTypedDict` | `ConversationRequestToolTypedDict` | +| `HandoffExecution` | `ConversationRequestHandoffExecution` | +| `AgentVersion` | `ConversationRequestAgentVersion` | + +Enums now accept unknown values for forward compatibility with API changes. + --- ## Migrating from v0.x to v1.x diff --git a/README.md b/README.md index 129e8ee0..2f31ccf2 100644 --- a/README.md +++ b/README.md @@ -458,25 +458,25 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [complete](docs/sdks/transcriptions/README.md#complete) - Create Transcription * [stream](docs/sdks/transcriptions/README.md#stream) - Create Streaming Transcription (SSE) -### [Batch.Jobs](docs/sdks/mistraljobs/README.md) - -* [list](docs/sdks/mistraljobs/README.md#list) - Get Batch Jobs -* [create](docs/sdks/mistraljobs/README.md#create) - Create Batch Job -* [get](docs/sdks/mistraljobs/README.md#get) - Get Batch Job -* [cancel](docs/sdks/mistraljobs/README.md#cancel) - Cancel Batch Job - -### [Beta.Agents](docs/sdks/mistralagents/README.md) - -* [create](docs/sdks/mistralagents/README.md#create) - Create a agent that can be used within a conversation. -* [list](docs/sdks/mistralagents/README.md#list) - List agent entities. -* [get](docs/sdks/mistralagents/README.md#get) - Retrieve an agent entity. -* [update](docs/sdks/mistralagents/README.md#update) - Update an agent entity. -* [delete](docs/sdks/mistralagents/README.md#delete) - Delete an agent entity. -* [update_version](docs/sdks/mistralagents/README.md#update_version) - Update an agent version. -* [list_versions](docs/sdks/mistralagents/README.md#list_versions) - List all versions of an agent. -* [get_version](docs/sdks/mistralagents/README.md#get_version) - Retrieve a specific version of an agent. -* [create_version_alias](docs/sdks/mistralagents/README.md#create_version_alias) - Create or update an agent version alias. -* [list_version_aliases](docs/sdks/mistralagents/README.md#list_version_aliases) - List all aliases for an agent. +### [Batch.Jobs](docs/sdks/batchjobs/README.md) + +* [list](docs/sdks/batchjobs/README.md#list) - Get Batch Jobs +* [create](docs/sdks/batchjobs/README.md#create) - Create Batch Job +* [get](docs/sdks/batchjobs/README.md#get) - Get Batch Job +* [cancel](docs/sdks/batchjobs/README.md#cancel) - Cancel Batch Job + +### [Beta.Agents](docs/sdks/betaagents/README.md) + +* [create](docs/sdks/betaagents/README.md#create) - Create a agent that can be used within a conversation. +* [list](docs/sdks/betaagents/README.md#list) - List agent entities. +* [get](docs/sdks/betaagents/README.md#get) - Retrieve an agent entity. +* [update](docs/sdks/betaagents/README.md#update) - Update an agent entity. +* [delete](docs/sdks/betaagents/README.md#delete) - Delete an agent entity. +* [update_version](docs/sdks/betaagents/README.md#update_version) - Update an agent version. +* [list_versions](docs/sdks/betaagents/README.md#list_versions) - List all versions of an agent. +* [get_version](docs/sdks/betaagents/README.md#get_version) - Retrieve a specific version of an agent. +* [create_version_alias](docs/sdks/betaagents/README.md#create_version_alias) - Create or update an agent version alias. +* [list_version_aliases](docs/sdks/betaagents/README.md#list_version_aliases) - List all aliases for an agent. ### [Beta.Conversations](docs/sdks/conversations/README.md) @@ -549,13 +549,13 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [complete](docs/sdks/fim/README.md#complete) - Fim Completion * [stream](docs/sdks/fim/README.md#stream) - Stream fim completion -### [FineTuning.Jobs](docs/sdks/jobs/README.md) +### [FineTuning.Jobs](docs/sdks/finetuningjobs/README.md) -* [list](docs/sdks/jobs/README.md#list) - Get Fine Tuning Jobs -* [create](docs/sdks/jobs/README.md#create) - Create Fine Tuning Job -* [get](docs/sdks/jobs/README.md#get) - Get Fine Tuning Job -* [cancel](docs/sdks/jobs/README.md#cancel) - Cancel Fine Tuning Job -* [start](docs/sdks/jobs/README.md#start) - Start Fine Tuning Job +* [list](docs/sdks/finetuningjobs/README.md#list) - Get Fine Tuning Jobs +* [create](docs/sdks/finetuningjobs/README.md#create) - Create Fine Tuning Job +* [get](docs/sdks/finetuningjobs/README.md#get) - Get Fine Tuning Job +* [cancel](docs/sdks/finetuningjobs/README.md#cancel) - Cancel Fine Tuning Job +* [start](docs/sdks/finetuningjobs/README.md#start) - Start Fine Tuning Job ### [Models](docs/sdks/models/README.md) diff --git a/docs/models/agent.md b/docs/models/agent.md index ee054dd3..bd143350 100644 --- a/docs/models/agent.md +++ b/docs/models/agent.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentTools](../models/agenttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `tools` | List[[models.AgentTool](../models/agenttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `model` | *str* | :heavy_check_mark: | N/A | | `name` | *str* | :heavy_check_mark: | N/A | diff --git a/docs/models/agentcreationrequest.md b/docs/models/agentcreationrequest.md index afc27d3b..6a24c00b 100644 --- a/docs/models/agentcreationrequest.md +++ b/docs/models/agentcreationrequest.md @@ -3,13 +3,13 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentCreationRequestTools](../models/agentcreationrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `model` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentCreationRequestTool](../models/agentcreationrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentcreationrequesttool.md b/docs/models/agentcreationrequesttool.md new file mode 100644 index 00000000..b3bd7fa3 --- /dev/null +++ b/docs/models/agentcreationrequesttool.md @@ -0,0 +1,41 @@ +# AgentCreationRequestTool + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/agenthandoffdoneevent.md b/docs/models/agenthandoffdoneevent.md index c0039f41..6bfcc3d8 100644 --- a/docs/models/agenthandoffdoneevent.md +++ b/docs/models/agenthandoffdoneevent.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `type` | [Optional[models.AgentHandoffDoneEventType]](../models/agenthandoffdoneeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `next_agent_id` | *str* | :heavy_check_mark: | N/A | -| `next_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["agent.handoff.done"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffdoneeventtype.md b/docs/models/agenthandoffdoneeventtype.md deleted file mode 100644 index c864ce43..00000000 --- a/docs/models/agenthandoffdoneeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# AgentHandoffDoneEventType - - -## Values - -| Name | Value | -| -------------------- | -------------------- | -| `AGENT_HANDOFF_DONE` | agent.handoff.done | \ No newline at end of file diff --git a/docs/models/agenthandoffstartedevent.md b/docs/models/agenthandoffstartedevent.md index 035cd02a..518b5a0c 100644 --- a/docs/models/agenthandoffstartedevent.md +++ b/docs/models/agenthandoffstartedevent.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | -| `type` | [Optional[models.AgentHandoffStartedEventType]](../models/agenthandoffstartedeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | -| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["agent.handoff.started"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffstartedeventtype.md b/docs/models/agenthandoffstartedeventtype.md deleted file mode 100644 index 4ffaff15..00000000 --- a/docs/models/agenthandoffstartedeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# AgentHandoffStartedEventType - - -## Values - -| Name | Value | -| ----------------------- | ----------------------- | -| `AGENT_HANDOFF_STARTED` | agent.handoff.started | \ No newline at end of file diff --git a/docs/models/queryparamagentversion.md b/docs/models/agentsapiv1agentsgetagentversion.md similarity index 79% rename from docs/models/queryparamagentversion.md rename to docs/models/agentsapiv1agentsgetagentversion.md index 3eb5ef18..7fb9f2d5 100644 --- a/docs/models/queryparamagentversion.md +++ b/docs/models/agentsapiv1agentsgetagentversion.md @@ -1,4 +1,4 @@ -# QueryParamAgentVersion +# AgentsAPIV1AgentsGetAgentVersion ## Supported Types diff --git a/docs/models/agentsapiv1agentsgetrequest.md b/docs/models/agentsapiv1agentsgetrequest.md index c71d4419..ceffe009 100644 --- a/docs/models/agentsapiv1agentsgetrequest.md +++ b/docs/models/agentsapiv1agentsgetrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | [OptionalNullable[models.QueryParamAgentVersion]](../models/queryparamagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.AgentsAPIV1AgentsGetAgentVersion]](../models/agentsapiv1agentsgetagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responsebody.md b/docs/models/agentsapiv1conversationslistresponse.md similarity index 84% rename from docs/models/responsebody.md rename to docs/models/agentsapiv1conversationslistresponse.md index 8a218517..b233ee20 100644 --- a/docs/models/responsebody.md +++ b/docs/models/agentsapiv1conversationslistresponse.md @@ -1,4 +1,4 @@ -# ResponseBody +# AgentsAPIV1ConversationsListResponse ## Supported Types diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index 2a0c4144..d87dc7da 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -10,7 +10,7 @@ | `stop` | [Optional[models.AgentsCompletionRequestStop]](../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `messages` | List[[models.AgentsCompletionRequestMessages](../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.AgentsCompletionRequestMessage](../models/agentscompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | diff --git a/docs/models/instructrequestinputsmessages.md b/docs/models/agentscompletionrequestmessage.md similarity index 92% rename from docs/models/instructrequestinputsmessages.md rename to docs/models/agentscompletionrequestmessage.md index 237e131f..957703b5 100644 --- a/docs/models/instructrequestinputsmessages.md +++ b/docs/models/agentscompletionrequestmessage.md @@ -1,4 +1,4 @@ -# InstructRequestInputsMessages +# AgentsCompletionRequestMessage ## Supported Types diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index b2ccd4e8..dd1804a1 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -10,7 +10,7 @@ | `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.AgentsCompletionStreamRequestMessage](../models/agentscompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | diff --git a/docs/models/chatcompletionstreamrequestmessages.md b/docs/models/agentscompletionstreamrequestmessage.md similarity index 90% rename from docs/models/chatcompletionstreamrequestmessages.md rename to docs/models/agentscompletionstreamrequestmessage.md index 47990611..6ccf4244 100644 --- a/docs/models/chatcompletionstreamrequestmessages.md +++ b/docs/models/agentscompletionstreamrequestmessage.md @@ -1,4 +1,4 @@ -# ChatCompletionStreamRequestMessages +# AgentsCompletionStreamRequestMessage ## Supported Types diff --git a/docs/models/agentscompletionstreamrequestmessages.md b/docs/models/agentscompletionstreamrequestmessages.md deleted file mode 100644 index 1bc736af..00000000 --- a/docs/models/agentscompletionstreamrequestmessages.md +++ /dev/null @@ -1,29 +0,0 @@ -# AgentsCompletionStreamRequestMessages - - -## Supported Types - -### `models.AssistantMessage` - -```python -value: models.AssistantMessage = /* values here */ -``` - -### `models.SystemMessage` - -```python -value: models.SystemMessage = /* values here */ -``` - -### `models.ToolMessage` - -```python -value: models.ToolMessage = /* values here */ -``` - -### `models.UserMessage` - -```python -value: models.UserMessage = /* values here */ -``` - diff --git a/docs/models/tools.md b/docs/models/agenttool.md similarity index 98% rename from docs/models/tools.md rename to docs/models/agenttool.md index f308d732..022f7e10 100644 --- a/docs/models/tools.md +++ b/docs/models/agenttool.md @@ -1,4 +1,4 @@ -# Tools +# AgentTool ## Supported Types diff --git a/docs/models/agentupdaterequest.md b/docs/models/agentupdaterequest.md index 641d1e40..b276e199 100644 --- a/docs/models/agentupdaterequest.md +++ b/docs/models/agentupdaterequest.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentUpdateRequestTools](../models/agentupdaterequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentUpdateRequestTool](../models/agentupdaterequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modelconversationtools.md b/docs/models/agentupdaterequesttool.md similarity index 96% rename from docs/models/modelconversationtools.md rename to docs/models/agentupdaterequesttool.md index 5cc97437..ce553126 100644 --- a/docs/models/modelconversationtools.md +++ b/docs/models/agentupdaterequesttool.md @@ -1,4 +1,4 @@ -# ModelConversationTools +# AgentUpdateRequestTool ## Supported Types diff --git a/docs/models/audiochunk.md b/docs/models/audiochunk.md index c443e7ad..8a04af04 100644 --- a/docs/models/audiochunk.md +++ b/docs/models/audiochunk.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -| `input_audio` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.AudioChunkType]](../models/audiochunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------ | ------------------------ | ------------------------ | ------------------------ | +| `input_audio` | *str* | :heavy_check_mark: | N/A | +| `type` | *Literal["input_audio"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/audiochunktype.md b/docs/models/audiochunktype.md deleted file mode 100644 index 46ebf372..00000000 --- a/docs/models/audiochunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# AudioChunkType - - -## Values - -| Name | Value | -| ------------- | ------------- | -| `INPUT_AUDIO` | input_audio | \ No newline at end of file diff --git a/docs/models/basemodelcard.md b/docs/models/basemodelcard.md index 58ad5e25..0f42504f 100644 --- a/docs/models/basemodelcard.md +++ b/docs/models/basemodelcard.md @@ -17,4 +17,4 @@ | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.BaseModelCardType]](../models/basemodelcardtype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | *Literal["base"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/basemodelcardtype.md b/docs/models/basemodelcardtype.md deleted file mode 100644 index 4a40ce76..00000000 --- a/docs/models/basemodelcardtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# BaseModelCardType - - -## Values - -| Name | Value | -| ------ | ------ | -| `BASE` | base | \ No newline at end of file diff --git a/docs/models/chatcompletionchoice.md b/docs/models/chatcompletionchoice.md index d77d286e..deaa0ea0 100644 --- a/docs/models/chatcompletionchoice.md +++ b/docs/models/chatcompletionchoice.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | Example | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `index` | *int* | :heavy_check_mark: | N/A | 0 | -| `message` | [models.AssistantMessage](../models/assistantmessage.md) | :heavy_check_mark: | N/A | | -| `finish_reason` | [models.FinishReason](../models/finishreason.md) | :heavy_check_mark: | N/A | stop | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | 0 | +| `message` | [models.AssistantMessage](../models/assistantmessage.md) | :heavy_check_mark: | N/A | | +| `finish_reason` | [models.ChatCompletionChoiceFinishReason](../models/chatcompletionchoicefinishreason.md) | :heavy_check_mark: | N/A | stop | \ No newline at end of file diff --git a/docs/models/finishreason.md b/docs/models/chatcompletionchoicefinishreason.md similarity index 88% rename from docs/models/finishreason.md rename to docs/models/chatcompletionchoicefinishreason.md index 2af53f6e..b2f15ecb 100644 --- a/docs/models/finishreason.md +++ b/docs/models/chatcompletionchoicefinishreason.md @@ -1,4 +1,4 @@ -# FinishReason +# ChatCompletionChoiceFinishReason ## Values diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index 109fa7b1..f3abeeff 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -10,10 +10,10 @@ | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.ChatCompletionRequestMessage](../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | diff --git a/docs/models/one.md b/docs/models/chatcompletionrequestmessage.md similarity index 92% rename from docs/models/one.md rename to docs/models/chatcompletionrequestmessage.md index 3de496a6..91e9e062 100644 --- a/docs/models/one.md +++ b/docs/models/chatcompletionrequestmessage.md @@ -1,4 +1,4 @@ -# One +# ChatCompletionRequestMessage ## Supported Types diff --git a/docs/models/stop.md b/docs/models/chatcompletionrequeststop.md similarity index 90% rename from docs/models/stop.md rename to docs/models/chatcompletionrequeststop.md index ba40ca83..749296d4 100644 --- a/docs/models/stop.md +++ b/docs/models/chatcompletionrequeststop.md @@ -1,4 +1,4 @@ -# Stop +# ChatCompletionRequestStop Stop generation if this token is detected. Or if one of these tokens is detected when providing an array diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index 7d5fb411..42792d39 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -13,7 +13,7 @@ | `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.ChatCompletionStreamRequestMessage](../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | diff --git a/docs/models/agentscompletionrequestmessages.md b/docs/models/chatcompletionstreamrequestmessage.md similarity index 91% rename from docs/models/agentscompletionrequestmessages.md rename to docs/models/chatcompletionstreamrequestmessage.md index d6a1e691..2e4e93ac 100644 --- a/docs/models/agentscompletionrequestmessages.md +++ b/docs/models/chatcompletionstreamrequestmessage.md @@ -1,4 +1,4 @@ -# AgentsCompletionRequestMessages +# ChatCompletionStreamRequestMessage ## Supported Types diff --git a/docs/models/chatmoderationrequest.md b/docs/models/chatmoderationrequest.md index 69b6c1dc..f252482d 100644 --- a/docs/models/chatmoderationrequest.md +++ b/docs/models/chatmoderationrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `inputs` | [models.ChatModerationRequestInputs](../models/chatmoderationrequestinputs.md) | :heavy_check_mark: | Chat to classify | -| `model` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `inputs` | [models.ChatModerationRequestInputs3](../models/chatmoderationrequestinputs3.md) | :heavy_check_mark: | Chat to classify | +| `model` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatmoderationrequestinputs.md b/docs/models/chatmoderationrequestinputs.md deleted file mode 100644 index cf775d60..00000000 --- a/docs/models/chatmoderationrequestinputs.md +++ /dev/null @@ -1,19 +0,0 @@ -# ChatModerationRequestInputs - -Chat to classify - - -## Supported Types - -### `List[models.One]` - -```python -value: List[models.One] = /* values here */ -``` - -### `List[List[models.Two]]` - -```python -value: List[List[models.Two]] = /* values here */ -``` - diff --git a/docs/models/instructrequestmessages.md b/docs/models/chatmoderationrequestinputs1.md similarity index 92% rename from docs/models/instructrequestmessages.md rename to docs/models/chatmoderationrequestinputs1.md index 9c866a7d..e15b8a84 100644 --- a/docs/models/instructrequestmessages.md +++ b/docs/models/chatmoderationrequestinputs1.md @@ -1,4 +1,4 @@ -# InstructRequestMessages +# ChatModerationRequestInputs1 ## Supported Types diff --git a/docs/models/chatmoderationrequestinputs2.md b/docs/models/chatmoderationrequestinputs2.md new file mode 100644 index 00000000..f40a4ebe --- /dev/null +++ b/docs/models/chatmoderationrequestinputs2.md @@ -0,0 +1,29 @@ +# ChatModerationRequestInputs2 + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/chatmoderationrequestinputs3.md b/docs/models/chatmoderationrequestinputs3.md new file mode 100644 index 00000000..ff1c6ea3 --- /dev/null +++ b/docs/models/chatmoderationrequestinputs3.md @@ -0,0 +1,19 @@ +# ChatModerationRequestInputs3 + +Chat to classify + + +## Supported Types + +### `List[models.ChatModerationRequestInputs1]` + +```python +value: List[models.ChatModerationRequestInputs1] = /* values here */ +``` + +### `List[List[models.ChatModerationRequestInputs2]]` + +```python +value: List[List[models.ChatModerationRequestInputs2]] = /* values here */ +``` + diff --git a/docs/models/classifierdetailedjobout.md b/docs/models/classifierdetailedjobout.md index ccc88f89..15f70aeb 100644 --- a/docs/models/classifierdetailedjobout.md +++ b/docs/models/classifierdetailedjobout.md @@ -16,10 +16,10 @@ | `object` | [Optional[models.ClassifierDetailedJobOutObject]](../models/classifierdetailedjoboutobject.md) | :heavy_minus_sign: | N/A | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `integrations` | List[[models.ClassifierDetailedJobOutIntegrations](../models/classifierdetailedjoboutintegrations.md)] | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.ClassifierDetailedJobOutIntegration](../models/classifierdetailedjoboutintegration.md)] | :heavy_minus_sign: | N/A | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | -| `job_type` | [Optional[models.ClassifierDetailedJobOutJobType]](../models/classifierdetailedjoboutjobtype.md) | :heavy_minus_sign: | N/A | +| `job_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A | | `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | | `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | | `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | diff --git a/docs/models/completiondetailedjoboutintegrations.md b/docs/models/classifierdetailedjoboutintegration.md similarity index 76% rename from docs/models/completiondetailedjoboutintegrations.md rename to docs/models/classifierdetailedjoboutintegration.md index af6bbcc5..9dfa6e8a 100644 --- a/docs/models/completiondetailedjoboutintegrations.md +++ b/docs/models/classifierdetailedjoboutintegration.md @@ -1,4 +1,4 @@ -# CompletionDetailedJobOutIntegrations +# ClassifierDetailedJobOutIntegration ## Supported Types diff --git a/docs/models/classifierdetailedjoboutjobtype.md b/docs/models/classifierdetailedjoboutjobtype.md deleted file mode 100644 index 0d1c6573..00000000 --- a/docs/models/classifierdetailedjoboutjobtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ClassifierDetailedJobOutJobType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `CLASSIFIER` | classifier | \ No newline at end of file diff --git a/docs/models/classifierftmodelout.md b/docs/models/classifierftmodelout.md index dd9e8bf9..d7bcd3ca 100644 --- a/docs/models/classifierftmodelout.md +++ b/docs/models/classifierftmodelout.md @@ -3,21 +3,21 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.ClassifierFTModelOutObject]](../models/classifierftmodeloutobject.md) | :heavy_minus_sign: | N/A | -| `created` | *int* | :heavy_check_mark: | N/A | -| `owned_by` | *str* | :heavy_check_mark: | N/A | -| `workspace_id` | *str* | :heavy_check_mark: | N/A | -| `root` | *str* | :heavy_check_mark: | N/A | -| `root_version` | *str* | :heavy_check_mark: | N/A | -| `archived` | *bool* | :heavy_check_mark: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | -| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | -| `job` | *str* | :heavy_check_mark: | N/A | -| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | -| `model_type` | [Optional[models.ClassifierFTModelOutModelType]](../models/classifierftmodeloutmodeltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | [Optional[models.ClassifierFTModelOutObject]](../models/classifierftmodeloutobject.md) | :heavy_minus_sign: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `root_version` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | +| `model_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierftmodeloutmodeltype.md b/docs/models/classifierftmodeloutmodeltype.md deleted file mode 100644 index e1e7e465..00000000 --- a/docs/models/classifierftmodeloutmodeltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ClassifierFTModelOutModelType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `CLASSIFIER` | classifier | \ No newline at end of file diff --git a/docs/models/classifierjobout.md b/docs/models/classifierjobout.md index aa1d3ca9..f8259cab 100644 --- a/docs/models/classifierjobout.md +++ b/docs/models/classifierjobout.md @@ -16,8 +16,8 @@ | `object` | [Optional[models.ClassifierJobOutObject]](../models/classifierjoboutobject.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | -| `integrations` | List[[models.ClassifierJobOutIntegrations](../models/classifierjoboutintegrations.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | +| `integrations` | List[[models.ClassifierJobOutIntegration](../models/classifierjoboutintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | | `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | -| `job_type` | [Optional[models.ClassifierJobOutJobType]](../models/classifierjoboutjobtype.md) | :heavy_minus_sign: | The type of job (`FT` for fine-tuning). | +| `job_type` | *Literal["classifier"]* | :heavy_check_mark: | The type of job (`FT` for fine-tuning). | | `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/integrations.md b/docs/models/classifierjoboutintegration.md similarity index 80% rename from docs/models/integrations.md rename to docs/models/classifierjoboutintegration.md index 35214d63..33af8a70 100644 --- a/docs/models/integrations.md +++ b/docs/models/classifierjoboutintegration.md @@ -1,4 +1,4 @@ -# Integrations +# ClassifierJobOutIntegration ## Supported Types diff --git a/docs/models/classifierjoboutjobtype.md b/docs/models/classifierjoboutjobtype.md deleted file mode 100644 index 7f5236fa..00000000 --- a/docs/models/classifierjoboutjobtype.md +++ /dev/null @@ -1,10 +0,0 @@ -# ClassifierJobOutJobType - -The type of job (`FT` for fine-tuning). - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `CLASSIFIER` | classifier | \ No newline at end of file diff --git a/docs/models/codeinterpretertool.md b/docs/models/codeinterpretertool.md index d5ad789e..544cda93 100644 --- a/docs/models/codeinterpretertool.md +++ b/docs/models/codeinterpretertool.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `type` | [Optional[models.CodeInterpreterToolType]](../models/codeinterpretertooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | +| `type` | *Literal["code_interpreter"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/codeinterpretertooltype.md b/docs/models/codeinterpretertooltype.md deleted file mode 100644 index f704b65e..00000000 --- a/docs/models/codeinterpretertooltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# CodeInterpreterToolType - - -## Values - -| Name | Value | -| ------------------ | ------------------ | -| `CODE_INTERPRETER` | code_interpreter | \ No newline at end of file diff --git a/docs/models/completiondetailedjobout.md b/docs/models/completiondetailedjobout.md index 84613080..725ebcde 100644 --- a/docs/models/completiondetailedjobout.md +++ b/docs/models/completiondetailedjobout.md @@ -16,11 +16,11 @@ | `object` | [Optional[models.CompletionDetailedJobOutObject]](../models/completiondetailedjoboutobject.md) | :heavy_minus_sign: | N/A | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `integrations` | List[[models.CompletionDetailedJobOutIntegrations](../models/completiondetailedjoboutintegrations.md)] | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.CompletionDetailedJobOutIntegration](../models/completiondetailedjoboutintegration.md)] | :heavy_minus_sign: | N/A | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | -| `job_type` | [Optional[models.CompletionDetailedJobOutJobType]](../models/completiondetailedjoboutjobtype.md) | :heavy_minus_sign: | N/A | +| `job_type` | *Literal["completion"]* | :heavy_check_mark: | N/A | | `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | -| `repositories` | List[[models.CompletionDetailedJobOutRepositories](../models/completiondetailedjoboutrepositories.md)] | :heavy_minus_sign: | N/A | +| `repositories` | List[[models.CompletionDetailedJobOutRepository](../models/completiondetailedjoboutrepository.md)] | :heavy_minus_sign: | N/A | | `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | | `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classifierdetailedjoboutintegrations.md b/docs/models/completiondetailedjoboutintegration.md similarity index 76% rename from docs/models/classifierdetailedjoboutintegrations.md rename to docs/models/completiondetailedjoboutintegration.md index 5a09465e..9e526053 100644 --- a/docs/models/classifierdetailedjoboutintegrations.md +++ b/docs/models/completiondetailedjoboutintegration.md @@ -1,4 +1,4 @@ -# ClassifierDetailedJobOutIntegrations +# CompletionDetailedJobOutIntegration ## Supported Types diff --git a/docs/models/completiondetailedjoboutjobtype.md b/docs/models/completiondetailedjoboutjobtype.md deleted file mode 100644 index fb24db0c..00000000 --- a/docs/models/completiondetailedjoboutjobtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# CompletionDetailedJobOutJobType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `COMPLETION` | completion | \ No newline at end of file diff --git a/docs/models/completiondetailedjoboutrepositories.md b/docs/models/completiondetailedjoboutrepository.md similarity index 76% rename from docs/models/completiondetailedjoboutrepositories.md rename to docs/models/completiondetailedjoboutrepository.md index 4f9727c3..92a7b75c 100644 --- a/docs/models/completiondetailedjoboutrepositories.md +++ b/docs/models/completiondetailedjoboutrepository.md @@ -1,4 +1,4 @@ -# CompletionDetailedJobOutRepositories +# CompletionDetailedJobOutRepository ## Supported Types diff --git a/docs/models/completionftmodelout.md b/docs/models/completionftmodelout.md index cd085825..9ebfa83e 100644 --- a/docs/models/completionftmodelout.md +++ b/docs/models/completionftmodelout.md @@ -19,4 +19,4 @@ | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | | `aliases` | List[*str*] | :heavy_minus_sign: | N/A | | `job` | *str* | :heavy_check_mark: | N/A | -| `model_type` | [Optional[models.ModelType]](../models/modeltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `model_type` | *Literal["completion"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionjobout.md b/docs/models/completionjobout.md index cb471746..84be452f 100644 --- a/docs/models/completionjobout.md +++ b/docs/models/completionjobout.md @@ -8,7 +8,7 @@ | `id` | *str* | :heavy_check_mark: | The ID of the job. | | `auto_start` | *bool* | :heavy_check_mark: | N/A | | `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | -| `status` | [models.Status](../models/status.md) | :heavy_check_mark: | The current status of the fine-tuning job. | +| `status` | [models.CompletionJobOutStatus](../models/completionjoboutstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | | `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | | `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | @@ -16,9 +16,9 @@ | `object` | [Optional[models.CompletionJobOutObject]](../models/completionjoboutobject.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | -| `integrations` | List[[models.Integrations](../models/integrations.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | +| `integrations` | List[[models.CompletionJobOutIntegration](../models/completionjoboutintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | | `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | -| `job_type` | [Optional[models.JobType]](../models/jobtype.md) | :heavy_minus_sign: | The type of job (`FT` for fine-tuning). | +| `job_type` | *Literal["completion"]* | :heavy_check_mark: | The type of job (`FT` for fine-tuning). | | `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | -| `repositories` | List[[models.Repositories](../models/repositories.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `repositories` | List[[models.CompletionJobOutRepository](../models/completionjoboutrepository.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classifierjoboutintegrations.md b/docs/models/completionjoboutintegration.md similarity index 80% rename from docs/models/classifierjoboutintegrations.md rename to docs/models/completionjoboutintegration.md index d938d0b9..6474747b 100644 --- a/docs/models/classifierjoboutintegrations.md +++ b/docs/models/completionjoboutintegration.md @@ -1,4 +1,4 @@ -# ClassifierJobOutIntegrations +# CompletionJobOutIntegration ## Supported Types diff --git a/docs/models/repositories.md b/docs/models/completionjoboutrepository.md similarity index 81% rename from docs/models/repositories.md rename to docs/models/completionjoboutrepository.md index 02274e3d..52f65558 100644 --- a/docs/models/repositories.md +++ b/docs/models/completionjoboutrepository.md @@ -1,4 +1,4 @@ -# Repositories +# CompletionJobOutRepository ## Supported Types diff --git a/docs/models/status.md b/docs/models/completionjoboutstatus.md similarity index 96% rename from docs/models/status.md rename to docs/models/completionjoboutstatus.md index 5e22eb73..91754945 100644 --- a/docs/models/status.md +++ b/docs/models/completionjoboutstatus.md @@ -1,4 +1,4 @@ -# Status +# CompletionJobOutStatus The current status of the fine-tuning job. diff --git a/docs/models/conversationhistory.md b/docs/models/conversationhistory.md index ebb1d513..c8baad0b 100644 --- a/docs/models/conversationhistory.md +++ b/docs/models/conversationhistory.md @@ -9,4 +9,4 @@ Retrieve all entries in a conversation. | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | | `object` | [Optional[models.ConversationHistoryObject]](../models/conversationhistoryobject.md) | :heavy_minus_sign: | N/A | | `conversation_id` | *str* | :heavy_check_mark: | N/A | -| `entries` | List[[models.Entries](../models/entries.md)] | :heavy_check_mark: | N/A | \ No newline at end of file +| `entries` | List[[models.Entry](../models/entry.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationrequest.md b/docs/models/conversationrequest.md index 2b4ff8ef..bd7823a8 100644 --- a/docs/models/conversationrequest.md +++ b/docs/models/conversationrequest.md @@ -3,18 +3,18 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | -| `handoff_execution` | [OptionalNullable[models.HandoffExecution]](../models/handoffexecution.md) | :heavy_minus_sign: | N/A | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.Tools](../models/tools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_version` | [OptionalNullable[models.AgentVersion]](../models/agentversion.md) | :heavy_minus_sign: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `handoff_execution` | [OptionalNullable[models.ConversationRequestHandoffExecution]](../models/conversationrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationRequestTool](../models/conversationrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationRequestAgentVersion]](../models/conversationrequestagentversion.md) | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentversion.md b/docs/models/conversationrequestagentversion.md similarity index 80% rename from docs/models/agentversion.md rename to docs/models/conversationrequestagentversion.md index fd4b6a3e..9f251821 100644 --- a/docs/models/agentversion.md +++ b/docs/models/conversationrequestagentversion.md @@ -1,4 +1,4 @@ -# AgentVersion +# ConversationRequestAgentVersion ## Supported Types diff --git a/docs/models/handoffexecution.md b/docs/models/conversationrequesthandoffexecution.md similarity index 73% rename from docs/models/handoffexecution.md rename to docs/models/conversationrequesthandoffexecution.md index 61e7dade..e7314f7e 100644 --- a/docs/models/handoffexecution.md +++ b/docs/models/conversationrequesthandoffexecution.md @@ -1,4 +1,4 @@ -# HandoffExecution +# ConversationRequestHandoffExecution ## Values diff --git a/docs/models/agentupdaterequesttools.md b/docs/models/conversationrequesttool.md similarity index 95% rename from docs/models/agentupdaterequesttools.md rename to docs/models/conversationrequesttool.md index 1752ee68..2e4e8d01 100644 --- a/docs/models/agentupdaterequesttools.md +++ b/docs/models/conversationrequesttool.md @@ -1,4 +1,4 @@ -# AgentUpdateRequestTools +# ConversationRequestTool ## Supported Types diff --git a/docs/models/conversationresponse.md b/docs/models/conversationresponse.md index 38cdadd0..e3182128 100644 --- a/docs/models/conversationresponse.md +++ b/docs/models/conversationresponse.md @@ -9,5 +9,5 @@ The response after appending new entries to the conversation. | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | | `object` | [Optional[models.ConversationResponseObject]](../models/conversationresponseobject.md) | :heavy_minus_sign: | N/A | | `conversation_id` | *str* | :heavy_check_mark: | N/A | -| `outputs` | List[[models.Outputs](../models/outputs.md)] | :heavy_check_mark: | N/A | +| `outputs` | List[[models.Output](../models/output.md)] | :heavy_check_mark: | N/A | | `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationstreamrequest.md b/docs/models/conversationstreamrequest.md index 299346f8..8b74f9e7 100644 --- a/docs/models/conversationstreamrequest.md +++ b/docs/models/conversationstreamrequest.md @@ -10,7 +10,7 @@ | `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `handoff_execution` | [OptionalNullable[models.ConversationStreamRequestHandoffExecution]](../models/conversationstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.ConversationStreamRequestTools](../models/conversationstreamrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `tools` | List[[models.ConversationStreamRequestTool](../models/conversationstreamrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/agentcreationrequesttools.md b/docs/models/conversationstreamrequesttool.md similarity index 95% rename from docs/models/agentcreationrequesttools.md rename to docs/models/conversationstreamrequesttool.md index c2525850..0f75f82b 100644 --- a/docs/models/agentcreationrequesttools.md +++ b/docs/models/conversationstreamrequesttool.md @@ -1,4 +1,4 @@ -# AgentCreationRequestTools +# ConversationStreamRequestTool ## Supported Types diff --git a/docs/models/conversationstreamrequesttools.md b/docs/models/conversationstreamrequesttools.md deleted file mode 100644 index 700c8448..00000000 --- a/docs/models/conversationstreamrequesttools.md +++ /dev/null @@ -1,41 +0,0 @@ -# ConversationStreamRequestTools - - -## Supported Types - -### `models.CodeInterpreterTool` - -```python -value: models.CodeInterpreterTool = /* values here */ -``` - -### `models.DocumentLibraryTool` - -```python -value: models.DocumentLibraryTool = /* values here */ -``` - -### `models.FunctionTool` - -```python -value: models.FunctionTool = /* values here */ -``` - -### `models.ImageGenerationTool` - -```python -value: models.ImageGenerationTool = /* values here */ -``` - -### `models.WebSearchTool` - -```python -value: models.WebSearchTool = /* values here */ -``` - -### `models.WebSearchPremiumTool` - -```python -value: models.WebSearchPremiumTool = /* values here */ -``` - diff --git a/docs/models/deltamessage.md b/docs/models/deltamessage.md index 61deabbf..e0ee575f 100644 --- a/docs/models/deltamessage.md +++ b/docs/models/deltamessage.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `content` | [OptionalNullable[models.Content]](../models/content.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.DeltaMessageContent]](../models/deltamessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/content.md b/docs/models/deltamessagecontent.md similarity index 89% rename from docs/models/content.md rename to docs/models/deltamessagecontent.md index a833dc2c..8142772d 100644 --- a/docs/models/content.md +++ b/docs/models/deltamessagecontent.md @@ -1,4 +1,4 @@ -# Content +# DeltaMessageContent ## Supported Types diff --git a/docs/models/documentlibrarytool.md b/docs/models/documentlibrarytool.md index 82315f32..1695bad4 100644 --- a/docs/models/documentlibrarytool.md +++ b/docs/models/documentlibrarytool.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `type` | [Optional[models.DocumentLibraryToolType]](../models/documentlibrarytooltype.md) | :heavy_minus_sign: | N/A | -| `library_ids` | List[*str*] | :heavy_check_mark: | Ids of the library in which to search. | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | +| `type` | *Literal["document_library"]* | :heavy_check_mark: | N/A | +| `library_ids` | List[*str*] | :heavy_check_mark: | Ids of the library in which to search. | \ No newline at end of file diff --git a/docs/models/documentlibrarytooltype.md b/docs/models/documentlibrarytooltype.md deleted file mode 100644 index ebd420f6..00000000 --- a/docs/models/documentlibrarytooltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# DocumentLibraryToolType - - -## Values - -| Name | Value | -| ------------------ | ------------------ | -| `DOCUMENT_LIBRARY` | document_library | \ No newline at end of file diff --git a/docs/models/librariesdocumentsuploadv1documentupload.md b/docs/models/documentupload.md similarity index 98% rename from docs/models/librariesdocumentsuploadv1documentupload.md rename to docs/models/documentupload.md index a0ba95da..4e58a475 100644 --- a/docs/models/librariesdocumentsuploadv1documentupload.md +++ b/docs/models/documentupload.md @@ -1,4 +1,4 @@ -# LibrariesDocumentsUploadV1DocumentUpload +# DocumentUpload ## Fields diff --git a/docs/models/entries.md b/docs/models/entry.md similarity index 98% rename from docs/models/entries.md rename to docs/models/entry.md index 8e5a20d0..d934b677 100644 --- a/docs/models/entries.md +++ b/docs/models/entry.md @@ -1,4 +1,4 @@ -# Entries +# Entry ## Supported Types diff --git a/docs/models/ftmodelcard.md b/docs/models/ftmodelcard.md index 35032775..409f0526 100644 --- a/docs/models/ftmodelcard.md +++ b/docs/models/ftmodelcard.md @@ -19,7 +19,7 @@ Extra fields for fine-tuned models. | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.FTModelCardType]](../models/ftmodelcardtype.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["fine-tuned"]* | :heavy_check_mark: | N/A | | `job` | *str* | :heavy_check_mark: | N/A | | `root` | *str* | :heavy_check_mark: | N/A | | `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelcardtype.md b/docs/models/ftmodelcardtype.md deleted file mode 100644 index 0b38470b..00000000 --- a/docs/models/ftmodelcardtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FTModelCardType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `FINE_TUNED` | fine-tuned | \ No newline at end of file diff --git a/docs/models/functioncallevent.md b/docs/models/functioncallevent.md index c25679a5..f4062060 100644 --- a/docs/models/functioncallevent.md +++ b/docs/models/functioncallevent.md @@ -3,12 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `type` | [Optional[models.FunctionCallEventType]](../models/functioncalleventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `tool_call_id` | *str* | :heavy_check_mark: | N/A | -| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["function.call.delta"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/functioncalleventtype.md b/docs/models/functioncalleventtype.md deleted file mode 100644 index 8cf3f038..00000000 --- a/docs/models/functioncalleventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FunctionCallEventType - - -## Values - -| Name | Value | -| --------------------- | --------------------- | -| `FUNCTION_CALL_DELTA` | function.call.delta | \ No newline at end of file diff --git a/docs/models/functiontool.md b/docs/models/functiontool.md index 8c424593..0226b704 100644 --- a/docs/models/functiontool.md +++ b/docs/models/functiontool.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | -| `type` | [Optional[models.FunctionToolType]](../models/functiontooltype.md) | :heavy_minus_sign: | N/A | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `type` | *Literal["function"]* | :heavy_check_mark: | N/A | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/functiontooltype.md b/docs/models/functiontooltype.md deleted file mode 100644 index 9c095625..00000000 --- a/docs/models/functiontooltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FunctionToolType - - -## Values - -| Name | Value | -| ---------- | ---------- | -| `FUNCTION` | function | \ No newline at end of file diff --git a/docs/models/githubrepositoryin.md b/docs/models/githubrepositoryin.md index 1584152b..241cf584 100644 --- a/docs/models/githubrepositoryin.md +++ b/docs/models/githubrepositoryin.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `type` | [Optional[models.GithubRepositoryInType]](../models/githubrepositoryintype.md) | :heavy_minus_sign: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `owner` | *str* | :heavy_check_mark: | N/A | -| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | -| `token` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `type` | *Literal["github"]* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `owner` | *str* | :heavy_check_mark: | N/A | +| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `token` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/githubrepositoryintype.md b/docs/models/githubrepositoryintype.md deleted file mode 100644 index 63da967c..00000000 --- a/docs/models/githubrepositoryintype.md +++ /dev/null @@ -1,8 +0,0 @@ -# GithubRepositoryInType - - -## Values - -| Name | Value | -| -------- | -------- | -| `GITHUB` | github | \ No newline at end of file diff --git a/docs/models/githubrepositoryout.md b/docs/models/githubrepositoryout.md index 03f0b266..fe38393a 100644 --- a/docs/models/githubrepositoryout.md +++ b/docs/models/githubrepositoryout.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `type` | [Optional[models.GithubRepositoryOutType]](../models/githubrepositoryouttype.md) | :heavy_minus_sign: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `owner` | *str* | :heavy_check_mark: | N/A | -| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | -| `commit_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `type` | *Literal["github"]* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `owner` | *str* | :heavy_check_mark: | N/A | +| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `commit_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/githubrepositoryouttype.md b/docs/models/githubrepositoryouttype.md deleted file mode 100644 index 46c3eefd..00000000 --- a/docs/models/githubrepositoryouttype.md +++ /dev/null @@ -1,8 +0,0 @@ -# GithubRepositoryOutType - - -## Values - -| Name | Value | -| -------- | -------- | -| `GITHUB` | github | \ No newline at end of file diff --git a/docs/models/imagegenerationtool.md b/docs/models/imagegenerationtool.md index b8fc9cf4..0c8de72c 100644 --- a/docs/models/imagegenerationtool.md +++ b/docs/models/imagegenerationtool.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `type` | [Optional[models.ImageGenerationToolType]](../models/imagegenerationtooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | +| `type` | *Literal["image_generation"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/imagegenerationtooltype.md b/docs/models/imagegenerationtooltype.md deleted file mode 100644 index 29681b58..00000000 --- a/docs/models/imagegenerationtooltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ImageGenerationToolType - - -## Values - -| Name | Value | -| ------------------ | ------------------ | -| `IMAGE_GENERATION` | image_generation | \ No newline at end of file diff --git a/docs/models/imageurlchunk.md b/docs/models/imageurlchunk.md index f1b926ef..43078c78 100644 --- a/docs/models/imageurlchunk.md +++ b/docs/models/imageurlchunk.md @@ -7,5 +7,5 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `image_url` | [models.ImageURLChunkImageURL](../models/imageurlchunkimageurl.md) | :heavy_check_mark: | N/A | +| `image_url` | [models.ImageURLUnion](../models/imageurlunion.md) | :heavy_check_mark: | N/A | | `type` | [Optional[models.ImageURLChunkType]](../models/imageurlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/imageurlchunkimageurl.md b/docs/models/imageurlunion.md similarity index 86% rename from docs/models/imageurlchunkimageurl.md rename to docs/models/imageurlunion.md index 76738908..db97130f 100644 --- a/docs/models/imageurlchunkimageurl.md +++ b/docs/models/imageurlunion.md @@ -1,4 +1,4 @@ -# ImageURLChunkImageURL +# ImageURLUnion ## Supported Types diff --git a/docs/models/messages.md b/docs/models/inputsmessage.md similarity index 96% rename from docs/models/messages.md rename to docs/models/inputsmessage.md index 1d394500..e3543fb4 100644 --- a/docs/models/messages.md +++ b/docs/models/inputsmessage.md @@ -1,4 +1,4 @@ -# Messages +# InputsMessage ## Supported Types diff --git a/docs/models/instructrequest.md b/docs/models/instructrequest.md index 9500cb58..5f0cdfff 100644 --- a/docs/models/instructrequest.md +++ b/docs/models/instructrequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `messages` | List[[models.InstructRequestMessages](../models/instructrequestmessages.md)] | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `messages` | List[[models.InstructRequestMessage](../models/instructrequestmessage.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/instructrequestinputs.md b/docs/models/instructrequestinputs.md index 4caa028f..931ae5e4 100644 --- a/docs/models/instructrequestinputs.md +++ b/docs/models/instructrequestinputs.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `messages` | List[[models.InstructRequestInputsMessages](../models/instructrequestinputsmessages.md)] | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `messages` | List[[models.InputsMessage](../models/inputsmessage.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/two.md b/docs/models/instructrequestmessage.md similarity index 93% rename from docs/models/two.md rename to docs/models/instructrequestmessage.md index 59dc2be2..57ed27ab 100644 --- a/docs/models/two.md +++ b/docs/models/instructrequestmessage.md @@ -1,4 +1,4 @@ -# Two +# InstructRequestMessage ## Supported Types diff --git a/docs/models/jobin.md b/docs/models/jobin.md index b9651770..33e6ccc6 100644 --- a/docs/models/jobin.md +++ b/docs/models/jobin.md @@ -9,10 +9,10 @@ | `training_files` | List[[models.TrainingFile](../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | -| `integrations` | List[[models.JobInIntegrations](../models/jobinintegrations.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `integrations` | List[[models.JobInIntegration](../models/jobinintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | | `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | | `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | | `job_type` | [OptionalNullable[models.FineTuneableModelType]](../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | | `hyperparameters` | [models.Hyperparameters](../models/hyperparameters.md) | :heavy_check_mark: | N/A | -| `repositories` | List[[models.JobInRepositories](../models/jobinrepositories.md)] | :heavy_minus_sign: | N/A | +| `repositories` | List[[models.JobInRepository](../models/jobinrepository.md)] | :heavy_minus_sign: | N/A | | `classifier_targets` | List[[models.ClassifierTargetIn](../models/classifiertargetin.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobinintegrations.md b/docs/models/jobinintegration.md similarity index 85% rename from docs/models/jobinintegrations.md rename to docs/models/jobinintegration.md index 91c10242..103820e7 100644 --- a/docs/models/jobinintegrations.md +++ b/docs/models/jobinintegration.md @@ -1,4 +1,4 @@ -# JobInIntegrations +# JobInIntegration ## Supported Types diff --git a/docs/models/jobinrepositories.md b/docs/models/jobinrepository.md similarity index 86% rename from docs/models/jobinrepositories.md rename to docs/models/jobinrepository.md index b94477af..e873ae63 100644 --- a/docs/models/jobinrepositories.md +++ b/docs/models/jobinrepository.md @@ -1,4 +1,4 @@ -# JobInRepositories +# JobInRepository ## Supported Types diff --git a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md index eeddc3cd..13191e90 100644 --- a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md +++ b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md @@ -5,10 +5,10 @@ OK ## Supported Types -### `models.Response1` +### `models.Response` ```python -value: models.Response1 = /* values here */ +value: models.Response = /* values here */ ``` ### `models.LegacyJobMetadataOut` diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md index 3dca3cd8..23c52c34 100644 --- a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md @@ -3,15 +3,15 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | -| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | -| `status` | [OptionalNullable[models.QueryParamStatus]](../models/queryparamstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | -| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | -| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus]](../models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | \ No newline at end of file diff --git a/docs/models/queryparamstatus.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md similarity index 94% rename from docs/models/queryparamstatus.md rename to docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md index dcd20908..40d57686 100644 --- a/docs/models/queryparamstatus.md +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md @@ -1,4 +1,4 @@ -# QueryParamStatus +# JobsAPIRoutesFineTuningGetFineTuningJobsStatus The current job state to filter on. When set, the other results are not displayed. diff --git a/docs/models/jobtype.md b/docs/models/jobtype.md deleted file mode 100644 index 847c6622..00000000 --- a/docs/models/jobtype.md +++ /dev/null @@ -1,10 +0,0 @@ -# JobType - -The type of job (`FT` for fine-tuning). - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `COMPLETION` | completion | \ No newline at end of file diff --git a/docs/models/librariesdocumentsuploadv1request.md b/docs/models/librariesdocumentsuploadv1request.md index 7c91ca9b..172a6183 100644 --- a/docs/models/librariesdocumentsuploadv1request.md +++ b/docs/models/librariesdocumentsuploadv1request.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `request_body` | [models.LibrariesDocumentsUploadV1DocumentUpload](../models/librariesdocumentsuploadv1documentupload.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `request_body` | [models.DocumentUpload](../models/documentupload.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/messageinputentry.md b/docs/models/messageinputentry.md index d55eb876..52183a32 100644 --- a/docs/models/messageinputentry.md +++ b/docs/models/messageinputentry.md @@ -5,13 +5,13 @@ Representation of an input message inside the conversation. ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.MessageInputEntryType]](../models/messageinputentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | -| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `object` | [Optional[models.MessageInputEntryObject]](../models/messageinputentryobject.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.MessageInputEntryType]](../models/messageinputentrytype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | +| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/object.md b/docs/models/messageinputentryobject.md similarity index 75% rename from docs/models/object.md rename to docs/models/messageinputentryobject.md index 0122c0db..6bdd62e2 100644 --- a/docs/models/object.md +++ b/docs/models/messageinputentryobject.md @@ -1,4 +1,4 @@ -# Object +# MessageInputEntryObject ## Values diff --git a/docs/models/messageoutputevent.md b/docs/models/messageoutputevent.md index 92c1c615..b0fa1a2d 100644 --- a/docs/models/messageoutputevent.md +++ b/docs/models/messageoutputevent.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `type` | [Optional[models.MessageOutputEventType]](../models/messageoutputeventtype.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["message.output.delta"]* | :heavy_check_mark: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | diff --git a/docs/models/messageoutputeventtype.md b/docs/models/messageoutputeventtype.md deleted file mode 100644 index 1f43fdcc..00000000 --- a/docs/models/messageoutputeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageOutputEventType - - -## Values - -| Name | Value | -| ---------------------- | ---------------------- | -| `MESSAGE_OUTPUT_DELTA` | message.output.delta | \ No newline at end of file diff --git a/docs/models/modelconversation.md b/docs/models/modelconversation.md index 1a03ef7d..813e1f3a 100644 --- a/docs/models/modelconversation.md +++ b/docs/models/modelconversation.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.ModelConversationTools](../models/modelconversationtools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `tools` | List[[models.ModelConversationTool](../models/modelconversationtool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | diff --git a/docs/models/agenttools.md b/docs/models/modelconversationtool.md similarity index 96% rename from docs/models/agenttools.md rename to docs/models/modelconversationtool.md index 15891f56..87235567 100644 --- a/docs/models/agenttools.md +++ b/docs/models/modelconversationtool.md @@ -1,4 +1,4 @@ -# AgentTools +# ModelConversationTool ## Supported Types diff --git a/docs/models/modellist.md b/docs/models/modellist.md index 760882c6..85b20be7 100644 --- a/docs/models/modellist.md +++ b/docs/models/modellist.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | -| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `data` | List[[models.Data](../models/data.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `data` | List[[models.ModelListData](../models/modellistdata.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/data.md b/docs/models/modellistdata.md similarity index 92% rename from docs/models/data.md rename to docs/models/modellistdata.md index 95dc8d28..b44e84a0 100644 --- a/docs/models/data.md +++ b/docs/models/modellistdata.md @@ -1,4 +1,4 @@ -# Data +# ModelListData ## Supported Types diff --git a/docs/models/modeltype.md b/docs/models/modeltype.md deleted file mode 100644 index a31c3ca0..00000000 --- a/docs/models/modeltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ModelType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `COMPLETION` | completion | \ No newline at end of file diff --git a/docs/models/filesapiroutesuploadfilemultipartbodyparams.md b/docs/models/multipartbodyparams.md similarity index 99% rename from docs/models/filesapiroutesuploadfilemultipartbodyparams.md rename to docs/models/multipartbodyparams.md index a5dd1174..f14b9573 100644 --- a/docs/models/filesapiroutesuploadfilemultipartbodyparams.md +++ b/docs/models/multipartbodyparams.md @@ -1,4 +1,4 @@ -# FilesAPIRoutesUploadFileMultiPartBodyParams +# MultiPartBodyParams ## Fields diff --git a/docs/models/outputs.md b/docs/models/output.md similarity index 97% rename from docs/models/outputs.md rename to docs/models/output.md index 7756c627..d0ee0db9 100644 --- a/docs/models/outputs.md +++ b/docs/models/output.md @@ -1,4 +1,4 @@ -# Outputs +# Output ## Supported Types diff --git a/docs/models/realtimetranscriptionerrordetail.md b/docs/models/realtimetranscriptionerrordetail.md index 96420ada..5b34755d 100644 --- a/docs/models/realtimetranscriptionerrordetail.md +++ b/docs/models/realtimetranscriptionerrordetail.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | -| `message` | [models.Message](../models/message.md) | :heavy_check_mark: | Human-readable error message. | -| `code` | *int* | :heavy_check_mark: | Internal error code for debugging. | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------ | +| `message` | [models.RealtimeTranscriptionErrorDetailMessage](../models/realtimetranscriptionerrordetailmessage.md) | :heavy_check_mark: | Human-readable error message. | +| `code` | *int* | :heavy_check_mark: | Internal error code for debugging. | \ No newline at end of file diff --git a/docs/models/message.md b/docs/models/realtimetranscriptionerrordetailmessage.md similarity index 81% rename from docs/models/message.md rename to docs/models/realtimetranscriptionerrordetailmessage.md index 752f04a8..da3764ef 100644 --- a/docs/models/message.md +++ b/docs/models/realtimetranscriptionerrordetailmessage.md @@ -1,4 +1,4 @@ -# Message +# RealtimeTranscriptionErrorDetailMessage Human-readable error message. diff --git a/docs/models/response1.md b/docs/models/response.md similarity index 94% rename from docs/models/response1.md rename to docs/models/response.md index 2e73fdbb..3512b7a8 100644 --- a/docs/models/response1.md +++ b/docs/models/response.md @@ -1,4 +1,4 @@ -# Response1 +# Response ## Supported Types diff --git a/docs/models/responsedoneevent.md b/docs/models/responsedoneevent.md index ec25bd6d..63d4cc06 100644 --- a/docs/models/responsedoneevent.md +++ b/docs/models/responsedoneevent.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseDoneEventType]](../models/responsedoneeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["conversation.response.done"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/responsedoneeventtype.md b/docs/models/responsedoneeventtype.md deleted file mode 100644 index 58f7f44d..00000000 --- a/docs/models/responsedoneeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ResponseDoneEventType - - -## Values - -| Name | Value | -| ---------------------------- | ---------------------------- | -| `CONVERSATION_RESPONSE_DONE` | conversation.response.done | \ No newline at end of file diff --git a/docs/models/responseerrorevent.md b/docs/models/responseerrorevent.md index 2ea6a2e0..4309bdad 100644 --- a/docs/models/responseerrorevent.md +++ b/docs/models/responseerrorevent.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `type` | [Optional[models.ResponseErrorEventType]](../models/responseerroreventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `message` | *str* | :heavy_check_mark: | N/A | -| `code` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["conversation.response.error"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `message` | *str* | :heavy_check_mark: | N/A | +| `code` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/responseerroreventtype.md b/docs/models/responseerroreventtype.md deleted file mode 100644 index 3b3fc303..00000000 --- a/docs/models/responseerroreventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ResponseErrorEventType - - -## Values - -| Name | Value | -| ----------------------------- | ----------------------------- | -| `CONVERSATION_RESPONSE_ERROR` | conversation.response.error | \ No newline at end of file diff --git a/docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md b/docs/models/responseretrievemodelv1modelsmodelidget.md similarity index 75% rename from docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md rename to docs/models/responseretrievemodelv1modelsmodelidget.md index 3ac96521..ffbc1473 100644 --- a/docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md +++ b/docs/models/responseretrievemodelv1modelsmodelidget.md @@ -1,4 +1,4 @@ -# RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet +# ResponseRetrieveModelV1ModelsModelIDGet Successful Response diff --git a/docs/models/responsestartedevent.md b/docs/models/responsestartedevent.md index 481bd5bb..e2f421af 100644 --- a/docs/models/responsestartedevent.md +++ b/docs/models/responsestartedevent.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseStartedEventType]](../models/responsestartedeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["conversation.response.started"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/responsestartedeventtype.md b/docs/models/responsestartedeventtype.md deleted file mode 100644 index 2d9273bd..00000000 --- a/docs/models/responsestartedeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ResponseStartedEventType - - -## Values - -| Name | Value | -| ------------------------------- | ------------------------------- | -| `CONVERSATION_RESPONSE_STARTED` | conversation.response.started | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md b/docs/models/responsev1conversationsget.md similarity index 81% rename from docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md rename to docs/models/responsev1conversationsget.md index 4bc836f3..844c5d61 100644 --- a/docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md +++ b/docs/models/responsev1conversationsget.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsGetResponseV1ConversationsGet +# ResponseV1ConversationsGet Successful Response diff --git a/docs/models/role.md b/docs/models/role.md deleted file mode 100644 index affca78d..00000000 --- a/docs/models/role.md +++ /dev/null @@ -1,8 +0,0 @@ -# Role - - -## Values - -| Name | Value | -| -------- | -------- | -| `SYSTEM` | system | \ No newline at end of file diff --git a/docs/models/systemmessage.md b/docs/models/systemmessage.md index 0dba71c0..dfb0cd0b 100644 --- a/docs/models/systemmessage.md +++ b/docs/models/systemmessage.md @@ -6,4 +6,4 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | | `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `role` | *Literal["system"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondeltaevent.md b/docs/models/toolexecutiondeltaevent.md index 7bee6d83..7066f348 100644 --- a/docs/models/toolexecutiondeltaevent.md +++ b/docs/models/toolexecutiondeltaevent.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ToolExecutionDeltaEventType]](../models/toolexecutiondeltaeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `name` | [models.ToolExecutionDeltaEventName](../models/toolexecutiondeltaeventname.md) | :heavy_check_mark: | N/A | -| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `type` | *Literal["tool.execution.delta"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | [models.ToolExecutionDeltaEventName](../models/toolexecutiondeltaeventname.md) | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondeltaeventtype.md b/docs/models/toolexecutiondeltaeventtype.md deleted file mode 100644 index a4a2f8cc..00000000 --- a/docs/models/toolexecutiondeltaeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolExecutionDeltaEventType - - -## Values - -| Name | Value | -| ---------------------- | ---------------------- | -| `TOOL_EXECUTION_DELTA` | tool.execution.delta | \ No newline at end of file diff --git a/docs/models/toolexecutiondoneevent.md b/docs/models/toolexecutiondoneevent.md index 5898ea5e..b2d81be3 100644 --- a/docs/models/toolexecutiondoneevent.md +++ b/docs/models/toolexecutiondoneevent.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ToolExecutionDoneEventType]](../models/toolexecutiondoneeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `name` | [models.ToolExecutionDoneEventName](../models/toolexecutiondoneeventname.md) | :heavy_check_mark: | N/A | -| `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `type` | *Literal["tool.execution.done"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | [models.ToolExecutionDoneEventName](../models/toolexecutiondoneeventname.md) | :heavy_check_mark: | N/A | +| `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondoneeventtype.md b/docs/models/toolexecutiondoneeventtype.md deleted file mode 100644 index 872624c1..00000000 --- a/docs/models/toolexecutiondoneeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolExecutionDoneEventType - - -## Values - -| Name | Value | -| --------------------- | --------------------- | -| `TOOL_EXECUTION_DONE` | tool.execution.done | \ No newline at end of file diff --git a/docs/models/toolexecutionentry.md b/docs/models/toolexecutionentry.md index 3678116d..adf88fb1 100644 --- a/docs/models/toolexecutionentry.md +++ b/docs/models/toolexecutionentry.md @@ -10,6 +10,6 @@ | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `name` | [models.Name](../models/name.md) | :heavy_check_mark: | N/A | +| `name` | [models.ToolExecutionEntryName](../models/toolexecutionentryname.md) | :heavy_check_mark: | N/A | | `arguments` | *str* | :heavy_check_mark: | N/A | | `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/name.md b/docs/models/toolexecutionentryname.md similarity index 87% rename from docs/models/name.md rename to docs/models/toolexecutionentryname.md index 18b978a8..fb762a53 100644 --- a/docs/models/name.md +++ b/docs/models/toolexecutionentryname.md @@ -1,4 +1,4 @@ -# Name +# ToolExecutionEntryName ## Supported Types diff --git a/docs/models/toolexecutionstartedevent.md b/docs/models/toolexecutionstartedevent.md index de81312b..c41c7258 100644 --- a/docs/models/toolexecutionstartedevent.md +++ b/docs/models/toolexecutionstartedevent.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ToolExecutionStartedEventType]](../models/toolexecutionstartedeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `name` | [models.ToolExecutionStartedEventName](../models/toolexecutionstartedeventname.md) | :heavy_check_mark: | N/A | -| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `type` | *Literal["tool.execution.started"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | [models.ToolExecutionStartedEventName](../models/toolexecutionstartedeventname.md) | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionstartedeventtype.md b/docs/models/toolexecutionstartedeventtype.md deleted file mode 100644 index 56695d1f..00000000 --- a/docs/models/toolexecutionstartedeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolExecutionStartedEventType - - -## Values - -| Name | Value | -| ------------------------ | ------------------------ | -| `TOOL_EXECUTION_STARTED` | tool.execution.started | \ No newline at end of file diff --git a/docs/models/toolmessage.md b/docs/models/toolmessage.md index a54f4933..fa00d666 100644 --- a/docs/models/toolmessage.md +++ b/docs/models/toolmessage.md @@ -8,4 +8,4 @@ | `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | | `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `role` | *Literal["tool"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolmessagerole.md b/docs/models/toolmessagerole.md deleted file mode 100644 index c24e59c0..00000000 --- a/docs/models/toolmessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolMessageRole - - -## Values - -| Name | Value | -| ------ | ------ | -| `TOOL` | tool | \ No newline at end of file diff --git a/docs/models/transcriptionsegmentchunk.md b/docs/models/transcriptionsegmentchunk.md index f620b96a..00a599ee 100644 --- a/docs/models/transcriptionsegmentchunk.md +++ b/docs/models/transcriptionsegmentchunk.md @@ -3,12 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `text` | *str* | :heavy_check_mark: | N/A | -| `start` | *float* | :heavy_check_mark: | N/A | -| `end` | *float* | :heavy_check_mark: | N/A | -| `score` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | -| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | +| `text` | *str* | :heavy_check_mark: | N/A | +| `start` | *float* | :heavy_check_mark: | N/A | +| `end` | *float* | :heavy_check_mark: | N/A | +| `score` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.TranscriptionSegmentChunkType]](../models/transcriptionsegmentchunktype.md) | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/type.md b/docs/models/transcriptionsegmentchunktype.md similarity index 84% rename from docs/models/type.md rename to docs/models/transcriptionsegmentchunktype.md index d05ead75..2968fa26 100644 --- a/docs/models/type.md +++ b/docs/models/transcriptionsegmentchunktype.md @@ -1,4 +1,4 @@ -# Type +# TranscriptionSegmentChunkType ## Values diff --git a/docs/models/transcriptionstreamdone.md b/docs/models/transcriptionstreamdone.md index 9ecf7d9c..bca69a2b 100644 --- a/docs/models/transcriptionstreamdone.md +++ b/docs/models/transcriptionstreamdone.md @@ -3,12 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | -| `text` | *str* | :heavy_check_mark: | N/A | -| `segments` | List[[models.TranscriptionSegmentChunk](../models/transcriptionsegmentchunk.md)] | :heavy_minus_sign: | N/A | -| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.TranscriptionStreamDoneType]](../models/transcriptionstreamdonetype.md) | :heavy_minus_sign: | N/A | -| `language` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `segments` | List[[models.TranscriptionSegmentChunk](../models/transcriptionsegmentchunk.md)] | :heavy_minus_sign: | N/A | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | +| `type` | *Literal["transcription.done"]* | :heavy_check_mark: | N/A | +| `language` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamdonetype.md b/docs/models/transcriptionstreamdonetype.md deleted file mode 100644 index db092c4f..00000000 --- a/docs/models/transcriptionstreamdonetype.md +++ /dev/null @@ -1,8 +0,0 @@ -# TranscriptionStreamDoneType - - -## Values - -| Name | Value | -| -------------------- | -------------------- | -| `TRANSCRIPTION_DONE` | transcription.done | \ No newline at end of file diff --git a/docs/models/transcriptionstreamlanguage.md b/docs/models/transcriptionstreamlanguage.md index e16c8fdc..63fcfbc6 100644 --- a/docs/models/transcriptionstreamlanguage.md +++ b/docs/models/transcriptionstreamlanguage.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | -| `type` | [Optional[models.TranscriptionStreamLanguageType]](../models/transcriptionstreamlanguagetype.md) | :heavy_minus_sign: | N/A | -| `audio_language` | *str* | :heavy_check_mark: | N/A | -| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | +| `type` | *Literal["transcription.language"]* | :heavy_check_mark: | N/A | +| `audio_language` | *str* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamlanguagetype.md b/docs/models/transcriptionstreamlanguagetype.md deleted file mode 100644 index e93521e1..00000000 --- a/docs/models/transcriptionstreamlanguagetype.md +++ /dev/null @@ -1,8 +0,0 @@ -# TranscriptionStreamLanguageType - - -## Values - -| Name | Value | -| ------------------------ | ------------------------ | -| `TRANSCRIPTION_LANGUAGE` | transcription.language | \ No newline at end of file diff --git a/docs/models/transcriptionstreamsegmentdelta.md b/docs/models/transcriptionstreamsegmentdelta.md index 2ab32f97..e0143a39 100644 --- a/docs/models/transcriptionstreamsegmentdelta.md +++ b/docs/models/transcriptionstreamsegmentdelta.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -| `text` | *str* | :heavy_check_mark: | N/A | -| `start` | *float* | :heavy_check_mark: | N/A | -| `end` | *float* | :heavy_check_mark: | N/A | -| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.TranscriptionStreamSegmentDeltaType]](../models/transcriptionstreamsegmentdeltatype.md) | :heavy_minus_sign: | N/A | -| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------- | ---------------------------------- | ---------------------------------- | ---------------------------------- | +| `text` | *str* | :heavy_check_mark: | N/A | +| `start` | *float* | :heavy_check_mark: | N/A | +| `end` | *float* | :heavy_check_mark: | N/A | +| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `type` | *Literal["transcription.segment"]* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamsegmentdeltatype.md b/docs/models/transcriptionstreamsegmentdeltatype.md deleted file mode 100644 index 03ff3e8b..00000000 --- a/docs/models/transcriptionstreamsegmentdeltatype.md +++ /dev/null @@ -1,8 +0,0 @@ -# TranscriptionStreamSegmentDeltaType - - -## Values - -| Name | Value | -| ----------------------- | ----------------------- | -| `TRANSCRIPTION_SEGMENT` | transcription.segment | \ No newline at end of file diff --git a/docs/models/transcriptionstreamtextdelta.md b/docs/models/transcriptionstreamtextdelta.md index adddfe18..a4062171 100644 --- a/docs/models/transcriptionstreamtextdelta.md +++ b/docs/models/transcriptionstreamtextdelta.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -| `text` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.TranscriptionStreamTextDeltaType]](../models/transcriptionstreamtextdeltatype.md) | :heavy_minus_sign: | N/A | -| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------- | ------------------------------------- | ------------------------------------- | ------------------------------------- | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | *Literal["transcription.text.delta"]* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamtextdeltatype.md b/docs/models/transcriptionstreamtextdeltatype.md deleted file mode 100644 index b7c9d675..00000000 --- a/docs/models/transcriptionstreamtextdeltatype.md +++ /dev/null @@ -1,8 +0,0 @@ -# TranscriptionStreamTextDeltaType - - -## Values - -| Name | Value | -| -------------------------- | -------------------------- | -| `TRANSCRIPTION_TEXT_DELTA` | transcription.text.delta | \ No newline at end of file diff --git a/docs/models/usermessage.md b/docs/models/usermessage.md index 63b01310..78ed066e 100644 --- a/docs/models/usermessage.md +++ b/docs/models/usermessage.md @@ -6,4 +6,4 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | | `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `role` | *Literal["user"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/usermessagerole.md b/docs/models/usermessagerole.md deleted file mode 100644 index 171124e4..00000000 --- a/docs/models/usermessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# UserMessageRole - - -## Values - -| Name | Value | -| ------ | ------ | -| `USER` | user | \ No newline at end of file diff --git a/docs/models/wandbintegration.md b/docs/models/wandbintegration.md index 199d2edd..c73952d9 100644 --- a/docs/models/wandbintegration.md +++ b/docs/models/wandbintegration.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | -| `type` | [Optional[models.WandbIntegrationType]](../models/wandbintegrationtype.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["wandb"]* | :heavy_check_mark: | N/A | | `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | | `api_key` | *str* | :heavy_check_mark: | The WandB API key to use for authentication. | diff --git a/docs/models/wandbintegrationout.md b/docs/models/wandbintegrationout.md index cec02ed8..a6f65667 100644 --- a/docs/models/wandbintegrationout.md +++ b/docs/models/wandbintegrationout.md @@ -3,10 +3,10 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `type` | [Optional[models.WandbIntegrationOutType]](../models/wandbintegrationouttype.md) | :heavy_minus_sign: | N/A | -| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | -| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `type` | *Literal["wandb"]* | :heavy_check_mark: | N/A | +| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | +| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegrationouttype.md b/docs/models/wandbintegrationouttype.md deleted file mode 100644 index 5a7533c9..00000000 --- a/docs/models/wandbintegrationouttype.md +++ /dev/null @@ -1,8 +0,0 @@ -# WandbIntegrationOutType - - -## Values - -| Name | Value | -| ------- | ------- | -| `WANDB` | wandb | \ No newline at end of file diff --git a/docs/models/wandbintegrationtype.md b/docs/models/wandbintegrationtype.md deleted file mode 100644 index 4fdffe22..00000000 --- a/docs/models/wandbintegrationtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# WandbIntegrationType - - -## Values - -| Name | Value | -| ------- | ------- | -| `WANDB` | wandb | \ No newline at end of file diff --git a/docs/models/websearchpremiumtool.md b/docs/models/websearchpremiumtool.md index 941fc2b8..07b8b926 100644 --- a/docs/models/websearchpremiumtool.md +++ b/docs/models/websearchpremiumtool.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `type` | [Optional[models.WebSearchPremiumToolType]](../models/websearchpremiumtooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------- | ------------------------------- | ------------------------------- | ------------------------------- | +| `type` | *Literal["web_search_premium"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/websearchpremiumtooltype.md b/docs/models/websearchpremiumtooltype.md deleted file mode 100644 index 348bfe85..00000000 --- a/docs/models/websearchpremiumtooltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# WebSearchPremiumToolType - - -## Values - -| Name | Value | -| -------------------- | -------------------- | -| `WEB_SEARCH_PREMIUM` | web_search_premium | \ No newline at end of file diff --git a/docs/models/websearchtool.md b/docs/models/websearchtool.md index c8d708bd..da5e7b7b 100644 --- a/docs/models/websearchtool.md +++ b/docs/models/websearchtool.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `type` | [Optional[models.WebSearchToolType]](../models/websearchtooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `type` | *Literal["web_search"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/websearchtooltype.md b/docs/models/websearchtooltype.md deleted file mode 100644 index 57b6acbb..00000000 --- a/docs/models/websearchtooltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# WebSearchToolType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `WEB_SEARCH` | web_search | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 75efc492..cd3ec4c6 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -43,7 +43,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.AgentsCompletionRequestMessages](../../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.AgentsCompletionRequestMessage](../../models/agentscompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | @@ -108,7 +108,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.AgentsCompletionStreamRequestMessage](../../models/agentscompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/batchjobs/README.md similarity index 100% rename from docs/sdks/mistraljobs/README.md rename to docs/sdks/batchjobs/README.md diff --git a/docs/sdks/mistralagents/README.md b/docs/sdks/betaagents/README.md similarity index 78% rename from docs/sdks/mistralagents/README.md rename to docs/sdks/betaagents/README.md index fe0f6e35..8d23b875 100644 --- a/docs/sdks/mistralagents/README.md +++ b/docs/sdks/betaagents/README.md @@ -46,17 +46,17 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentCreationRequestTools](../../models/agentcreationrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentCreationRequestTool](../../models/agentcreationrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -141,11 +141,11 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | [OptionalNullable[models.QueryParamAgentVersion]](../../models/queryparamagentversion.md) | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.AgentsAPIV1AgentsGetAgentVersion]](../../models/agentsapiv1agentsgetagentversion.md) | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -187,19 +187,19 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentUpdateRequestTools](../../models/agentupdaterequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentUpdateRequestTool](../../models/agentupdaterequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 89c4fffb..6907c29d 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -44,12 +44,12 @@ with Mistral( | Parameter | Type | Required | Description | Example | | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | -| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.ChatCompletionRequestMessage](../../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | @@ -112,7 +112,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | -| `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.ChatCompletionStreamRequestMessage](../../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 634ee419..41b52081 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -87,11 +87,11 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | -| `inputs` | [models.ChatModerationRequestInputs](../../models/chatmoderationrequestinputs.md) | :heavy_check_mark: | Chat to classify | -| `model` | *str* | :heavy_check_mark: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| `inputs` | [models.ChatModerationRequestInputs3](../../models/chatmoderationrequestinputs3.md) | :heavy_check_mark: | Chat to classify | +| `model` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index acd43cdb..6aae03c5 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -47,22 +47,22 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | -| `handoff_execution` | [OptionalNullable[models.HandoffExecution]](../../models/handoffexecution.md) | :heavy_minus_sign: | N/A | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.Tools](../../models/tools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_version` | [OptionalNullable[models.AgentVersion]](../../models/agentversion.md) | :heavy_minus_sign: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `handoff_execution` | [OptionalNullable[models.ConversationRequestHandoffExecution]](../../models/conversationrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationRequestTool](../../models/conversationrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationRequestAgentVersion]](../../models/conversationrequestagentversion.md) | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -109,7 +109,7 @@ with Mistral( ### Response -**[List[models.ResponseBody]](../../models/.md)** +**[List[models.AgentsAPIV1ConversationsListResponse]](../../models/.md)** ### Errors @@ -150,7 +150,7 @@ with Mistral( ### Response -**[models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet](../../models/agentsapiv1conversationsgetresponsev1conversationsget.md)** +**[models.ResponseV1ConversationsGet](../../models/responsev1conversationsget.md)** ### Errors @@ -425,7 +425,7 @@ with Mistral( | `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `handoff_execution` | [OptionalNullable[models.ConversationStreamRequestHandoffExecution]](../../models/conversationstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.ConversationStreamRequestTools](../../models/conversationstreamrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `tools` | List[[models.ConversationStreamRequestTool](../../models/conversationstreamrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | diff --git a/docs/sdks/jobs/README.md b/docs/sdks/finetuningjobs/README.md similarity index 83% rename from docs/sdks/jobs/README.md rename to docs/sdks/finetuningjobs/README.md index 9c44be75..63897fd6 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/finetuningjobs/README.md @@ -35,19 +35,19 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | -| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | -| `status` | [OptionalNullable[models.QueryParamStatus]](../../models/queryparamstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | -| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | -| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus]](../../models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -93,11 +93,11 @@ with Mistral( | `training_files` | List[[models.TrainingFile](../../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | -| `integrations` | List[[models.JobInIntegrations](../../models/jobinintegrations.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `integrations` | List[[models.JobInIntegration](../../models/jobinintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | | `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | | `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | | `job_type` | [OptionalNullable[models.FineTuneableModelType]](../../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | -| `repositories` | List[[models.JobInRepositories](../../models/jobinrepositories.md)] | :heavy_minus_sign: | N/A | +| `repositories` | List[[models.JobInRepository](../../models/jobinrepository.md)] | :heavy_minus_sign: | N/A | | `classifier_targets` | List[[models.ClassifierTargetIn](../../models/classifiertargetin.md)] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 6fa28ca2..129ea223 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -84,7 +84,7 @@ with Mistral( ### Response -**[models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet](../../models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md)** +**[models.ResponseRetrieveModelV1ModelsModelIDGet](../../models/responseretrievemodelv1modelsmodelidget.md)** ### Errors diff --git a/examples/azure/az_chat_no_streaming.py b/examples/azure/az_chat_no_streaming.py new file mode 100644 index 00000000..4d5530dc --- /dev/null +++ b/examples/azure/az_chat_no_streaming.py @@ -0,0 +1,15 @@ +import os + +from mistralai_azure import MistralAzure +from mistralai_azure.models import ChatCompletionRequestMessages, UserMessage + +client = MistralAzure( + azure_api_key=os.environ["AZURE_API_KEY"], + azure_endpoint=os.environ["AZURE_ENDPOINT"], +) + +messages: list[ChatCompletionRequestMessages] = [ + UserMessage(content="What is the capital of France?"), +] +res = client.chat.complete(messages=messages) +print(res.choices[0].message.content) diff --git a/examples/azure/az_chat_no_streaming.py.py b/examples/azure/az_chat_no_streaming.py.py deleted file mode 100644 index 485b594e..00000000 --- a/examples/azure/az_chat_no_streaming.py.py +++ /dev/null @@ -1,16 +0,0 @@ -import os - -from mistralai_azure import MistralAzure - -client = MistralAzure( - azure_api_key=os.environ["AZURE_API_KEY"], - azure_endpoint=os.environ["AZURE_ENDPOINT"], -) - -res = client.chat.complete( - messages=[ - {"role": "user", "content": "What is the capital of France?"}, - ], - # you don't need model as it will always be "azureai" -) -print(res.choices[0].message.content) diff --git a/examples/azure/chat_no_streaming.py b/examples/azure/chat_no_streaming.py new file mode 100644 index 00000000..4d5530dc --- /dev/null +++ b/examples/azure/chat_no_streaming.py @@ -0,0 +1,15 @@ +import os + +from mistralai_azure import MistralAzure +from mistralai_azure.models import ChatCompletionRequestMessages, UserMessage + +client = MistralAzure( + azure_api_key=os.environ["AZURE_API_KEY"], + azure_endpoint=os.environ["AZURE_ENDPOINT"], +) + +messages: list[ChatCompletionRequestMessages] = [ + UserMessage(content="What is the capital of France?"), +] +res = client.chat.complete(messages=messages) +print(res.choices[0].message.content) diff --git a/examples/azure/chat_no_streaming.py.py b/examples/azure/chat_no_streaming.py.py deleted file mode 100644 index 485b594e..00000000 --- a/examples/azure/chat_no_streaming.py.py +++ /dev/null @@ -1,16 +0,0 @@ -import os - -from mistralai_azure import MistralAzure - -client = MistralAzure( - azure_api_key=os.environ["AZURE_API_KEY"], - azure_endpoint=os.environ["AZURE_ENDPOINT"], -) - -res = client.chat.complete( - messages=[ - {"role": "user", "content": "What is the capital of France?"}, - ], - # you don't need model as it will always be "azureai" -) -print(res.choices[0].message.content) diff --git a/examples/mistral/chat/chatbot_with_streaming.py b/examples/mistral/chat/chatbot_with_streaming.py index bbc3881f..eae79dcf 100755 --- a/examples/mistral/chat/chatbot_with_streaming.py +++ b/examples/mistral/chat/chatbot_with_streaming.py @@ -7,6 +7,7 @@ import os import readline import sys +from typing import Any from mistralai.client import Mistral from mistralai.client.models import AssistantMessage, SystemMessage, UserMessage @@ -21,7 +22,7 @@ DEFAULT_TEMPERATURE = 0.7 LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s" # A dictionary of all commands and their arguments, used for tab completion. -COMMAND_LIST = { +COMMAND_LIST: dict[str, Any] = { "/new": {}, "/help": {}, "/model": {model: {} for model in MODEL_LIST}, # Nested completions for models diff --git a/examples/mistral/chat/function_calling.py b/examples/mistral/chat/function_calling.py index f0eb9e70..68e9d91c 100644 --- a/examples/mistral/chat/function_calling.py +++ b/examples/mistral/chat/function_calling.py @@ -1,16 +1,20 @@ import functools import json import os -from typing import Dict, List +from typing import Any from mistralai.client import Mistral -from mistralai.client.models.assistantmessage import AssistantMessage -from mistralai.client.models.function import Function -from mistralai.client.models.toolmessage import ToolMessage -from mistralai.client.models.usermessage import UserMessage +from mistralai.client.models import ( + AssistantMessage, + ChatCompletionRequestMessage, + Function, + Tool, + ToolMessage, + UserMessage, +) # Assuming we have the following data -data = { +data: dict[str, list[Any]] = { "transaction_id": ["T1001", "T1002", "T1003", "T1004", "T1005"], "customer_id": ["C001", "C002", "C003", "C002", "C001"], "payment_amount": [125.50, 89.99, 120.00, 54.30, 210.20], @@ -25,20 +29,18 @@ } -def retrieve_payment_status(data: Dict[str, List], transaction_id: str) -> str: +def retrieve_payment_status(data: dict[str, list[Any]], transaction_id: str) -> str: for i, r in enumerate(data["transaction_id"]): if r == transaction_id: return json.dumps({"status": data["payment_status"][i]}) - else: - return json.dumps({"status": "Error - transaction id not found"}) + return json.dumps({"status": "Error - transaction id not found"}) -def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: +def retrieve_payment_date(data: dict[str, list[Any]], transaction_id: str) -> str: for i, r in enumerate(data["transaction_id"]): if r == transaction_id: return json.dumps({"date": data["payment_date"][i]}) - else: - return json.dumps({"status": "Error - transaction id not found"}) + return json.dumps({"status": "Error - transaction id not found"}) names_to_functions = { @@ -46,10 +48,9 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: "retrieve_payment_date": functools.partial(retrieve_payment_date, data=data), } -tools = [ - { - "type": "function", - "function": Function( +tools: list[Tool] = [ + Tool( + function=Function( name="retrieve_payment_status", description="Get payment status of a transaction id", parameters={ @@ -63,10 +64,9 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: }, }, ), - }, - { - "type": "function", - "function": Function( + ), + Tool( + function=Function( name="retrieve_payment_date", description="Get payment date of a transaction id", parameters={ @@ -80,7 +80,7 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: }, }, ), - }, + ), ] api_key = os.environ["MISTRAL_API_KEY"] @@ -88,28 +88,27 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: client = Mistral(api_key=api_key) -messages = [UserMessage(content="What's the status of my transaction?")] +messages: list[ChatCompletionRequestMessage] = [ + UserMessage(content="What's the status of my transaction?") +] -response = client.chat.complete( - model=model, messages=messages, tools=tools, temperature=0 -) +response = client.chat.complete(model=model, messages=messages, tools=tools, temperature=0) print(response.choices[0].message.content) messages.append(AssistantMessage(content=response.choices[0].message.content)) messages.append(UserMessage(content="My transaction ID is T1001.")) -response = client.chat.complete( - model=model, messages=messages, tools=tools, temperature=0 -) +response = client.chat.complete(model=model, messages=messages, tools=tools, temperature=0) -tool_call = response.choices[0].message.tool_calls[0] +tool_calls = response.choices[0].message.tool_calls +if not tool_calls: + raise RuntimeError("Expected tool calls") +tool_call = tool_calls[0] function_name = tool_call.function.name -function_params = json.loads(tool_call.function.arguments) +function_params = json.loads(str(tool_call.function.arguments)) -print( - f"calling function_name: {function_name}, with function_params: {function_params}" -) +print(f"calling function_name: {function_name}, with function_params: {function_params}") function_result = names_to_functions[function_name](**function_params) @@ -128,8 +127,6 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: ) print(messages) -response = client.chat.complete( - model=model, messages=messages, tools=tools, temperature=0 -) +response = client.chat.complete(model=model, messages=messages, tools=tools, temperature=0) print(f"{response.choices[0].message.content}") diff --git a/examples/mistral/classifier/async_classifier.py b/examples/mistral/classifier/async_classifier.py index d5ee6cc1..45cc14fa 100644 --- a/examples/mistral/classifier/async_classifier.py +++ b/examples/mistral/classifier/async_classifier.py @@ -3,6 +3,7 @@ from pprint import pprint import asyncio from mistralai.client import Mistral, TrainingFile, ClassifierTrainingParametersIn +from mistralai.client.models import ClassifierJobOut import os @@ -26,7 +27,7 @@ async def upload_files(client: Mistral, file_names: list[str]) -> list[str]: return file_ids -async def train_classifier(client: Mistral,training_file_ids: list[str]) -> str: +async def train_classifier(client: Mistral, training_file_ids: list[str]) -> str | None: print("Creating job...") job = await client.fine_tuning.jobs.create_async( model="ministral-3b-latest", @@ -40,6 +41,9 @@ async def train_classifier(client: Mistral,training_file_ids: list[str]) -> str: ), auto_start=True, ) + if not isinstance(job, ClassifierJobOut): + print("Unexpected job type returned") + return None print(f"Job created ({job.id})") @@ -62,6 +66,9 @@ async def train_classifier(client: Mistral,training_file_ids: list[str]) -> str: print("Training failed") raise Exception(f"Job failed {detailed_job.status}") + if not detailed_job.fine_tuned_model: + print("No fine-tuned model returned") + return None print(f"Training succeed: {detailed_job.fine_tuned_model}") return detailed_job.fine_tuned_model diff --git a/pyproject.toml b/pyproject.toml index c9003a1e..7209c64c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "2.0.0a1" +version = "2.0.0a2" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/scripts/lint_custom_code.sh b/scripts/lint_custom_code.sh index 5bf9d675..57bab71a 100755 --- a/scripts/lint_custom_code.sh +++ b/scripts/lint_custom_code.sh @@ -11,8 +11,9 @@ else fi echo "Running mypy..." -# TODO: Uncomment once the examples are fixed -# uv run mypy examples/ || ERRORS=1 +echo "-> running on examples" +uv run mypy examples/ \ + --exclude 'audio/' || ERRORS=1 echo "-> running on extra" uv run mypy src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" diff --git a/src/mistralai/client/_version.py b/src/mistralai/client/_version.py index 8c5d6e54..5a7296a7 100644 --- a/src/mistralai/client/_version.py +++ b/src/mistralai/client/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "2.0.0a1" +__version__: str = "2.0.0a2" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 2.0.0a1 2.794.1 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 2.0.0a2 2.794.1 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/client/agents.py b/src/mistralai/client/agents.py index c04abd21..d0da9f07 100644 --- a/src/mistralai/client/agents.py +++ b/src/mistralai/client/agents.py @@ -24,9 +24,9 @@ def complete( self, *, messages: Union[ - List[models_agentscompletionrequest.AgentsCompletionRequestMessages], + List[models_agentscompletionrequest.AgentsCompletionRequestMessage], List[ - models_agentscompletionrequest.AgentsCompletionRequestMessagesTypedDict + models_agentscompletionrequest.AgentsCompletionRequestMessageTypedDict ], ], agent_id: str, @@ -110,7 +110,7 @@ def complete( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionRequestMessages] + messages, List[models.AgentsCompletionRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -194,9 +194,9 @@ async def complete_async( self, *, messages: Union[ - List[models_agentscompletionrequest.AgentsCompletionRequestMessages], + List[models_agentscompletionrequest.AgentsCompletionRequestMessage], List[ - models_agentscompletionrequest.AgentsCompletionRequestMessagesTypedDict + models_agentscompletionrequest.AgentsCompletionRequestMessageTypedDict ], ], agent_id: str, @@ -280,7 +280,7 @@ async def complete_async( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionRequestMessages] + messages, List[models.AgentsCompletionRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -365,10 +365,10 @@ def stream( *, messages: Union[ List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessages + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessage ], List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessagesTypedDict + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessageTypedDict ], ], agent_id: str, @@ -454,7 +454,7 @@ def stream( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionStreamRequestMessages] + messages, List[models.AgentsCompletionStreamRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -547,10 +547,10 @@ async def stream_async( *, messages: Union[ List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessages + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessage ], List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessagesTypedDict + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessageTypedDict ], ], agent_id: str, @@ -636,7 +636,7 @@ async def stream_async( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionStreamRequestMessages] + messages, List[models.AgentsCompletionStreamRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] diff --git a/src/mistralai/client/batch.py b/src/mistralai/client/batch.py index d53a45fb..586dc235 100644 --- a/src/mistralai/client/batch.py +++ b/src/mistralai/client/batch.py @@ -2,12 +2,12 @@ from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration -from mistralai.client.mistral_jobs import MistralJobs +from mistralai.client.batch_jobs import BatchJobs from typing import Optional class Batch(BaseSDK): - jobs: MistralJobs + jobs: BatchJobs def __init__( self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None @@ -17,4 +17,4 @@ def __init__( self._init_sdks() def _init_sdks(self): - self.jobs = MistralJobs(self.sdk_configuration, parent_ref=self.parent_ref) + self.jobs = BatchJobs(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/mistral_jobs.py b/src/mistralai/client/batch_jobs.py similarity index 99% rename from src/mistralai/client/mistral_jobs.py rename to src/mistralai/client/batch_jobs.py index eae44033..af8d97b2 100644 --- a/src/mistralai/client/mistral_jobs.py +++ b/src/mistralai/client/batch_jobs.py @@ -15,7 +15,7 @@ from typing import Any, Dict, List, Mapping, Optional, Union -class MistralJobs(BaseSDK): +class BatchJobs(BaseSDK): def list( self, *, diff --git a/src/mistralai/client/beta.py b/src/mistralai/client/beta.py index b30003ea..a1bd409e 100644 --- a/src/mistralai/client/beta.py +++ b/src/mistralai/client/beta.py @@ -2,16 +2,16 @@ from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration +from mistralai.client.beta_agents import BetaAgents from mistralai.client.conversations import Conversations from mistralai.client.libraries import Libraries -from mistralai.client.mistral_agents import MistralAgents from typing import Optional class Beta(BaseSDK): conversations: Conversations r"""(beta) Conversations API""" - agents: MistralAgents + agents: BetaAgents r"""(beta) Agents API""" libraries: Libraries r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" @@ -27,5 +27,5 @@ def _init_sdks(self): self.conversations = Conversations( self.sdk_configuration, parent_ref=self.parent_ref ) - self.agents = MistralAgents(self.sdk_configuration, parent_ref=self.parent_ref) + self.agents = BetaAgents(self.sdk_configuration, parent_ref=self.parent_ref) self.libraries = Libraries(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/mistral_agents.py b/src/mistralai/client/beta_agents.py similarity index 99% rename from src/mistralai/client/mistral_agents.py rename to src/mistralai/client/beta_agents.py index 2ac7a29e..1420895e 100644 --- a/src/mistralai/client/mistral_agents.py +++ b/src/mistralai/client/beta_agents.py @@ -16,7 +16,7 @@ from typing import Any, Dict, List, Mapping, Optional, Union -class MistralAgents(BaseSDK): +class BetaAgents(BaseSDK): r"""(beta) Agents API""" def create( @@ -27,8 +27,8 @@ def create( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_agentcreationrequest.AgentCreationRequestTools], - List[models_agentcreationrequest.AgentCreationRequestToolsTypedDict], + List[models_agentcreationrequest.AgentCreationRequestTool], + List[models_agentcreationrequest.AgentCreationRequestToolTypedDict], ] ] = None, completion_args: Optional[ @@ -75,7 +75,7 @@ def create( request = models.AgentCreationRequest( instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentCreationRequestTools]] + tools, Optional[List[models.AgentCreationRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] @@ -155,8 +155,8 @@ async def create_async( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_agentcreationrequest.AgentCreationRequestTools], - List[models_agentcreationrequest.AgentCreationRequestToolsTypedDict], + List[models_agentcreationrequest.AgentCreationRequestTool], + List[models_agentcreationrequest.AgentCreationRequestToolTypedDict], ] ] = None, completion_args: Optional[ @@ -203,7 +203,7 @@ async def create_async( request = models.AgentCreationRequest( instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentCreationRequestTools]] + tools, Optional[List[models.AgentCreationRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] @@ -497,8 +497,8 @@ def get( agent_id: str, agent_version: OptionalNullable[ Union[ - models_agents_api_v1_agents_getop.QueryParamAgentVersion, - models_agents_api_v1_agents_getop.QueryParamAgentVersionTypedDict, + models_agents_api_v1_agents_getop.AgentsAPIV1AgentsGetAgentVersion, + models_agents_api_v1_agents_getop.AgentsAPIV1AgentsGetAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -595,8 +595,8 @@ async def get_async( agent_id: str, agent_version: OptionalNullable[ Union[ - models_agents_api_v1_agents_getop.QueryParamAgentVersion, - models_agents_api_v1_agents_getop.QueryParamAgentVersionTypedDict, + models_agents_api_v1_agents_getop.AgentsAPIV1AgentsGetAgentVersion, + models_agents_api_v1_agents_getop.AgentsAPIV1AgentsGetAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -694,8 +694,8 @@ def update( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_agentupdaterequest.AgentUpdateRequestTools], - List[models_agentupdaterequest.AgentUpdateRequestToolsTypedDict], + List[models_agentupdaterequest.AgentUpdateRequestTool], + List[models_agentupdaterequest.AgentUpdateRequestToolTypedDict], ] ] = None, completion_args: Optional[ @@ -749,7 +749,7 @@ def update( agent_update_request=models.AgentUpdateRequest( instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentUpdateRequestTools]] + tools, Optional[List[models.AgentUpdateRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] @@ -834,8 +834,8 @@ async def update_async( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_agentupdaterequest.AgentUpdateRequestTools], - List[models_agentupdaterequest.AgentUpdateRequestToolsTypedDict], + List[models_agentupdaterequest.AgentUpdateRequestTool], + List[models_agentupdaterequest.AgentUpdateRequestToolTypedDict], ] ] = None, completion_args: Optional[ @@ -889,7 +889,7 @@ async def update_async( agent_update_request=models.AgentUpdateRequest( instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentUpdateRequestTools]] + tools, Optional[List[models.AgentUpdateRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] diff --git a/src/mistralai/client/chat.py b/src/mistralai/client/chat.py index 6fa210bb..523e3340 100644 --- a/src/mistralai/client/chat.py +++ b/src/mistralai/client/chat.py @@ -110,8 +110,8 @@ def complete( *, model: str, messages: Union[ - List[models_chatcompletionrequest.Messages], - List[models_chatcompletionrequest.MessagesTypedDict], + List[models_chatcompletionrequest.ChatCompletionRequestMessage], + List[models_chatcompletionrequest.ChatCompletionRequestMessageTypedDict], ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -119,8 +119,8 @@ def complete( stream: Optional[bool] = False, stop: Optional[ Union[ - models_chatcompletionrequest.Stop, - models_chatcompletionrequest.StopTypedDict, + models_chatcompletionrequest.ChatCompletionRequestStop, + models_chatcompletionrequest.ChatCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, @@ -201,7 +201,9 @@ def complete( stop=stop, random_seed=random_seed, metadata=metadata, - messages=utils.get_pydantic_model(messages, List[models.Messages]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessage] + ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] ), @@ -285,8 +287,8 @@ async def complete_async( *, model: str, messages: Union[ - List[models_chatcompletionrequest.Messages], - List[models_chatcompletionrequest.MessagesTypedDict], + List[models_chatcompletionrequest.ChatCompletionRequestMessage], + List[models_chatcompletionrequest.ChatCompletionRequestMessageTypedDict], ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -294,8 +296,8 @@ async def complete_async( stream: Optional[bool] = False, stop: Optional[ Union[ - models_chatcompletionrequest.Stop, - models_chatcompletionrequest.StopTypedDict, + models_chatcompletionrequest.ChatCompletionRequestStop, + models_chatcompletionrequest.ChatCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, @@ -376,7 +378,9 @@ async def complete_async( stop=stop, random_seed=random_seed, metadata=metadata, - messages=utils.get_pydantic_model(messages, List[models.Messages]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessage] + ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] ), @@ -460,11 +464,9 @@ def stream( *, model: str, messages: Union[ + List[models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessage], List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessages - ], - List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessagesTypedDict + models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessageTypedDict ], ], temperature: OptionalNullable[float] = UNSET, @@ -558,7 +560,7 @@ def stream( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionStreamRequestMessages] + messages, List[models.ChatCompletionStreamRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -651,11 +653,9 @@ async def stream_async( *, model: str, messages: Union[ + List[models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessage], List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessages - ], - List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessagesTypedDict + models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessageTypedDict ], ], temperature: OptionalNullable[float] = UNSET, @@ -749,7 +749,7 @@ async def stream_async( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionStreamRequestMessages] + messages, List[models.ChatCompletionStreamRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] diff --git a/src/mistralai/client/classifiers.py b/src/mistralai/client/classifiers.py index 537e2438..327653d1 100644 --- a/src/mistralai/client/classifiers.py +++ b/src/mistralai/client/classifiers.py @@ -221,8 +221,8 @@ def moderate_chat( self, *, inputs: Union[ - models_chatmoderationrequest.ChatModerationRequestInputs, - models_chatmoderationrequest.ChatModerationRequestInputsTypedDict, + models_chatmoderationrequest.ChatModerationRequestInputs3, + models_chatmoderationrequest.ChatModerationRequestInputs3TypedDict, ], model: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -250,7 +250,9 @@ def moderate_chat( base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( - inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), + inputs=utils.get_pydantic_model( + inputs, models.ChatModerationRequestInputs3 + ), model=model, ) @@ -318,8 +320,8 @@ async def moderate_chat_async( self, *, inputs: Union[ - models_chatmoderationrequest.ChatModerationRequestInputs, - models_chatmoderationrequest.ChatModerationRequestInputsTypedDict, + models_chatmoderationrequest.ChatModerationRequestInputs3, + models_chatmoderationrequest.ChatModerationRequestInputs3TypedDict, ], model: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -347,7 +349,9 @@ async def moderate_chat_async( base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( - inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), + inputs=utils.get_pydantic_model( + inputs, models.ChatModerationRequestInputs3 + ), model=model, ) diff --git a/src/mistralai/client/conversations.py b/src/mistralai/client/conversations.py index 285beddb..aa037bd2 100644 --- a/src/mistralai/client/conversations.py +++ b/src/mistralai/client/conversations.py @@ -60,7 +60,7 @@ async def run_async( inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], instructions: OptionalNullable[str] = UNSET, tools: OptionalNullable[ - Union[List[models.Tools], List[models.ToolsTypedDict]] + Union[List[models.ConversationRequestTool], List[models.ConversationRequestToolTypedDict]] ] = UNSET, completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] @@ -133,7 +133,7 @@ async def run_stream_async( inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], instructions: OptionalNullable[str] = UNSET, tools: OptionalNullable[ - Union[List[models.Tools], List[models.ToolsTypedDict]] + Union[List[models.ConversationRequestTool], List[models.ConversationRequestToolTypedDict]] ] = UNSET, completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] @@ -238,13 +238,13 @@ def start( stream: Optional[bool] = False, store: OptionalNullable[bool] = UNSET, handoff_execution: OptionalNullable[ - models_conversationrequest.HandoffExecution + models_conversationrequest.ConversationRequestHandoffExecution ] = UNSET, instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_conversationrequest.Tools], - List[models_conversationrequest.ToolsTypedDict], + List[models_conversationrequest.ConversationRequestTool], + List[models_conversationrequest.ConversationRequestToolTypedDict], ] ] = None, completion_args: OptionalNullable[ @@ -259,8 +259,8 @@ def start( agent_id: OptionalNullable[str] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationrequest.AgentVersion, - models_conversationrequest.AgentVersionTypedDict, + models_conversationrequest.ConversationRequestAgentVersion, + models_conversationrequest.ConversationRequestAgentVersionTypedDict, ] ] = UNSET, model: OptionalNullable[str] = UNSET, @@ -307,7 +307,9 @@ def start( store=store, handoff_execution=handoff_execution, instructions=instructions, - tools=utils.get_pydantic_model(tools, Optional[List[models.Tools]]), + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationRequestTool]] + ), completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] ), @@ -389,13 +391,13 @@ async def start_async( stream: Optional[bool] = False, store: OptionalNullable[bool] = UNSET, handoff_execution: OptionalNullable[ - models_conversationrequest.HandoffExecution + models_conversationrequest.ConversationRequestHandoffExecution ] = UNSET, instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_conversationrequest.Tools], - List[models_conversationrequest.ToolsTypedDict], + List[models_conversationrequest.ConversationRequestTool], + List[models_conversationrequest.ConversationRequestToolTypedDict], ] ] = None, completion_args: OptionalNullable[ @@ -410,8 +412,8 @@ async def start_async( agent_id: OptionalNullable[str] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationrequest.AgentVersion, - models_conversationrequest.AgentVersionTypedDict, + models_conversationrequest.ConversationRequestAgentVersion, + models_conversationrequest.ConversationRequestAgentVersionTypedDict, ] ] = UNSET, model: OptionalNullable[str] = UNSET, @@ -458,7 +460,9 @@ async def start_async( store=store, handoff_execution=handoff_execution, instructions=instructions, - tools=utils.get_pydantic_model(tools, Optional[List[models.Tools]]), + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationRequestTool]] + ), completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] ), @@ -540,7 +544,7 @@ def list( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.ResponseBody]: + ) -> List[models.AgentsAPIV1ConversationsListResponse]: r"""List all created conversations. Retrieve a list of conversation entities sorted by creation time. @@ -611,7 +615,9 @@ def list( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.ResponseBody], http_res) + return unmarshal_json_response( + List[models.AgentsAPIV1ConversationsListResponse], http_res + ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( models.HTTPValidationErrorData, http_res @@ -636,7 +642,7 @@ async def list_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.ResponseBody]: + ) -> List[models.AgentsAPIV1ConversationsListResponse]: r"""List all created conversations. Retrieve a list of conversation entities sorted by creation time. @@ -707,7 +713,9 @@ async def list_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.ResponseBody], http_res) + return unmarshal_json_response( + List[models.AgentsAPIV1ConversationsListResponse], http_res + ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( models.HTTPValidationErrorData, http_res @@ -730,7 +738,7 @@ def get( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet: + ) -> models.ResponseV1ConversationsGet: r"""Retrieve a conversation information. Given a conversation_id retrieve a conversation entity with its attributes. @@ -797,9 +805,7 @@ def get( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, http_res - ) + return unmarshal_json_response(models.ResponseV1ConversationsGet, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( models.HTTPValidationErrorData, http_res @@ -822,7 +828,7 @@ async def get_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet: + ) -> models.ResponseV1ConversationsGet: r"""Retrieve a conversation information. Given a conversation_id retrieve a conversation entity with its attributes. @@ -889,9 +895,7 @@ async def get_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, http_res - ) + return unmarshal_json_response(models.ResponseV1ConversationsGet, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( models.HTTPValidationErrorData, http_res @@ -1993,9 +1997,9 @@ def start_stream( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_conversationstreamrequest.ConversationStreamRequestTools], + List[models_conversationstreamrequest.ConversationStreamRequestTool], List[ - models_conversationstreamrequest.ConversationStreamRequestToolsTypedDict + models_conversationstreamrequest.ConversationStreamRequestToolTypedDict ], ] ] = None, @@ -2060,7 +2064,7 @@ def start_stream( handoff_execution=handoff_execution, instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.ConversationStreamRequestTools]] + tools, Optional[List[models.ConversationStreamRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] @@ -2155,9 +2159,9 @@ async def start_stream_async( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_conversationstreamrequest.ConversationStreamRequestTools], + List[models_conversationstreamrequest.ConversationStreamRequestTool], List[ - models_conversationstreamrequest.ConversationStreamRequestToolsTypedDict + models_conversationstreamrequest.ConversationStreamRequestToolTypedDict ], ] ] = None, @@ -2222,7 +2226,7 @@ async def start_stream_async( handoff_execution=handoff_execution, instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.ConversationStreamRequestTools]] + tools, Optional[List[models.ConversationStreamRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] diff --git a/src/mistralai/client/documents.py b/src/mistralai/client/documents.py index 009a604f..3316e63b 100644 --- a/src/mistralai/client/documents.py +++ b/src/mistralai/client/documents.py @@ -273,7 +273,7 @@ def upload( request = models.LibrariesDocumentsUploadV1Request( library_id=library_id, - request_body=models.LibrariesDocumentsUploadV1DocumentUpload( + request_body=models.DocumentUpload( file=utils.get_pydantic_model(file, models.File), ), ) @@ -292,11 +292,7 @@ def upload( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.request_body, - False, - False, - "multipart", - models.LibrariesDocumentsUploadV1DocumentUpload, + request.request_body, False, False, "multipart", models.DocumentUpload ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -383,7 +379,7 @@ async def upload_async( request = models.LibrariesDocumentsUploadV1Request( library_id=library_id, - request_body=models.LibrariesDocumentsUploadV1DocumentUpload( + request_body=models.DocumentUpload( file=utils.get_pydantic_model(file, models.File), ), ) @@ -402,11 +398,7 @@ async def upload_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.request_body, - False, - False, - "multipart", - models.LibrariesDocumentsUploadV1DocumentUpload, + request.request_body, False, False, "multipart", models.DocumentUpload ), allow_empty_value=None, timeout_ms=timeout_ms, diff --git a/src/mistralai/client/files.py b/src/mistralai/client/files.py index 97817eab..b384cda4 100644 --- a/src/mistralai/client/files.py +++ b/src/mistralai/client/files.py @@ -62,7 +62,7 @@ def upload( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( + request = models.MultiPartBodyParams( purpose=purpose, file=utils.get_pydantic_model(file, models.File), ) @@ -81,11 +81,7 @@ def upload( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, - False, - False, - "multipart", - models.FilesAPIRoutesUploadFileMultiPartBodyParams, + request, False, False, "multipart", models.MultiPartBodyParams ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -168,7 +164,7 @@ async def upload_async( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( + request = models.MultiPartBodyParams( purpose=purpose, file=utils.get_pydantic_model(file, models.File), ) @@ -187,11 +183,7 @@ async def upload_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, - False, - False, - "multipart", - models.FilesAPIRoutesUploadFileMultiPartBodyParams, + request, False, False, "multipart", models.MultiPartBodyParams ), allow_empty_value=None, timeout_ms=timeout_ms, diff --git a/src/mistralai/client/fine_tuning.py b/src/mistralai/client/fine_tuning.py index c57425fd..aeb832d4 100644 --- a/src/mistralai/client/fine_tuning.py +++ b/src/mistralai/client/fine_tuning.py @@ -2,12 +2,12 @@ from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration -from mistralai.client.jobs import Jobs +from mistralai.client.fine_tuning_jobs import FineTuningJobs from typing import Optional class FineTuning(BaseSDK): - jobs: Jobs + jobs: FineTuningJobs def __init__( self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None @@ -17,4 +17,4 @@ def __init__( self._init_sdks() def _init_sdks(self): - self.jobs = Jobs(self.sdk_configuration, parent_ref=self.parent_ref) + self.jobs = FineTuningJobs(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/jobs.py b/src/mistralai/client/fine_tuning_jobs.py similarity index 98% rename from src/mistralai/client/jobs.py rename to src/mistralai/client/fine_tuning_jobs.py index 848926ea..fb75e8c7 100644 --- a/src/mistralai/client/jobs.py +++ b/src/mistralai/client/fine_tuning_jobs.py @@ -17,7 +17,7 @@ from typing import List, Mapping, Optional, Union -class Jobs(BaseSDK): +class FineTuningJobs(BaseSDK): def list( self, *, @@ -28,7 +28,7 @@ def list( created_before: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[ - models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.QueryParamStatus + models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.JobsAPIRoutesFineTuningGetFineTuningJobsStatus ] = UNSET, wandb_project: OptionalNullable[str] = UNSET, wandb_name: OptionalNullable[str] = UNSET, @@ -141,7 +141,7 @@ async def list_async( created_before: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[ - models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.QueryParamStatus + models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.JobsAPIRoutesFineTuningGetFineTuningJobsStatus ] = UNSET, wandb_project: OptionalNullable[str] = UNSET, wandb_name: OptionalNullable[str] = UNSET, @@ -261,8 +261,8 @@ def create( suffix: OptionalNullable[str] = UNSET, integrations: OptionalNullable[ Union[ - List[models_jobin.JobInIntegrations], - List[models_jobin.JobInIntegrationsTypedDict], + List[models_jobin.JobInIntegration], + List[models_jobin.JobInIntegrationTypedDict], ] ] = UNSET, auto_start: Optional[bool] = None, @@ -272,8 +272,8 @@ def create( ] = UNSET, repositories: OptionalNullable[ Union[ - List[models_jobin.JobInRepositories], - List[models_jobin.JobInRepositoriesTypedDict], + List[models_jobin.JobInRepository], + List[models_jobin.JobInRepositoryTypedDict], ] ] = UNSET, classifier_targets: OptionalNullable[ @@ -325,7 +325,7 @@ def create( validation_files=validation_files, suffix=suffix, integrations=utils.get_pydantic_model( - integrations, OptionalNullable[List[models.JobInIntegrations]] + integrations, OptionalNullable[List[models.JobInIntegration]] ), auto_start=auto_start, invalid_sample_skip_percentage=invalid_sample_skip_percentage, @@ -334,7 +334,7 @@ def create( hyperparameters, models.Hyperparameters ), repositories=utils.get_pydantic_model( - repositories, OptionalNullable[List[models.JobInRepositories]] + repositories, OptionalNullable[List[models.JobInRepository]] ), classifier_targets=utils.get_pydantic_model( classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] @@ -414,8 +414,8 @@ async def create_async( suffix: OptionalNullable[str] = UNSET, integrations: OptionalNullable[ Union[ - List[models_jobin.JobInIntegrations], - List[models_jobin.JobInIntegrationsTypedDict], + List[models_jobin.JobInIntegration], + List[models_jobin.JobInIntegrationTypedDict], ] ] = UNSET, auto_start: Optional[bool] = None, @@ -425,8 +425,8 @@ async def create_async( ] = UNSET, repositories: OptionalNullable[ Union[ - List[models_jobin.JobInRepositories], - List[models_jobin.JobInRepositoriesTypedDict], + List[models_jobin.JobInRepository], + List[models_jobin.JobInRepositoryTypedDict], ] ] = UNSET, classifier_targets: OptionalNullable[ @@ -478,7 +478,7 @@ async def create_async( validation_files=validation_files, suffix=suffix, integrations=utils.get_pydantic_model( - integrations, OptionalNullable[List[models.JobInIntegrations]] + integrations, OptionalNullable[List[models.JobInIntegration]] ), auto_start=auto_start, invalid_sample_skip_percentage=invalid_sample_skip_percentage, @@ -487,7 +487,7 @@ async def create_async( hyperparameters, models.Hyperparameters ), repositories=utils.get_pydantic_model( - repositories, OptionalNullable[List[models.JobInRepositories]] + repositories, OptionalNullable[List[models.JobInRepository]] ), classifier_targets=utils.get_pydantic_model( classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] diff --git a/src/mistralai/client/models/__init__.py b/src/mistralai/client/models/__init__.py index 23e65222..046037c5 100644 --- a/src/mistralai/client/models/__init__.py +++ b/src/mistralai/client/models/__init__.py @@ -7,13 +7,7 @@ import sys if TYPE_CHECKING: - from .agent import ( - Agent, - AgentObject, - AgentTools, - AgentToolsTypedDict, - AgentTypedDict, - ) + from .agent import Agent, AgentObject, AgentTool, AgentToolTypedDict, AgentTypedDict from .agentaliasresponse import AgentAliasResponse, AgentAliasResponseTypedDict from .agentconversation import ( AgentConversation, @@ -24,13 +18,12 @@ ) from .agentcreationrequest import ( AgentCreationRequest, - AgentCreationRequestTools, - AgentCreationRequestToolsTypedDict, + AgentCreationRequestTool, + AgentCreationRequestToolTypedDict, AgentCreationRequestTypedDict, ) from .agenthandoffdoneevent import ( AgentHandoffDoneEvent, - AgentHandoffDoneEventType, AgentHandoffDoneEventTypedDict, ) from .agenthandoffentry import ( @@ -41,7 +34,6 @@ ) from .agenthandoffstartedevent import ( AgentHandoffStartedEvent, - AgentHandoffStartedEventType, AgentHandoffStartedEventTypedDict, ) from .agents_api_v1_agents_create_or_update_aliasop import ( @@ -57,10 +49,10 @@ AgentsAPIV1AgentsGetVersionRequestTypedDict, ) from .agents_api_v1_agents_getop import ( + AgentsAPIV1AgentsGetAgentVersion, + AgentsAPIV1AgentsGetAgentVersionTypedDict, AgentsAPIV1AgentsGetRequest, AgentsAPIV1AgentsGetRequestTypedDict, - QueryParamAgentVersion, - QueryParamAgentVersionTypedDict, ) from .agents_api_v1_agents_list_version_aliasesop import ( AgentsAPIV1AgentsListVersionAliasesRequest, @@ -97,8 +89,8 @@ from .agents_api_v1_conversations_getop import ( AgentsAPIV1ConversationsGetRequest, AgentsAPIV1ConversationsGetRequestTypedDict, - AgentsAPIV1ConversationsGetResponseV1ConversationsGet, - AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict, + ResponseV1ConversationsGet, + ResponseV1ConversationsGetTypedDict, ) from .agents_api_v1_conversations_historyop import ( AgentsAPIV1ConversationsHistoryRequest, @@ -107,8 +99,8 @@ from .agents_api_v1_conversations_listop import ( AgentsAPIV1ConversationsListRequest, AgentsAPIV1ConversationsListRequestTypedDict, - ResponseBody, - ResponseBodyTypedDict, + AgentsAPIV1ConversationsListResponse, + AgentsAPIV1ConversationsListResponseTypedDict, ) from .agents_api_v1_conversations_messagesop import ( AgentsAPIV1ConversationsMessagesRequest, @@ -124,8 +116,8 @@ ) from .agentscompletionrequest import ( AgentsCompletionRequest, - AgentsCompletionRequestMessages, - AgentsCompletionRequestMessagesTypedDict, + AgentsCompletionRequestMessage, + AgentsCompletionRequestMessageTypedDict, AgentsCompletionRequestStop, AgentsCompletionRequestStopTypedDict, AgentsCompletionRequestToolChoice, @@ -134,8 +126,8 @@ ) from .agentscompletionstreamrequest import ( AgentsCompletionStreamRequest, - AgentsCompletionStreamRequestMessages, - AgentsCompletionStreamRequestMessagesTypedDict, + AgentsCompletionStreamRequestMessage, + AgentsCompletionStreamRequestMessageTypedDict, AgentsCompletionStreamRequestStop, AgentsCompletionStreamRequestStopTypedDict, AgentsCompletionStreamRequestToolChoice, @@ -144,8 +136,8 @@ ) from .agentupdaterequest import ( AgentUpdateRequest, - AgentUpdateRequestTools, - AgentUpdateRequestToolsTypedDict, + AgentUpdateRequestTool, + AgentUpdateRequestToolTypedDict, AgentUpdateRequestTypedDict, ) from .apiendpoint import APIEndpoint @@ -161,7 +153,7 @@ AssistantMessageRole, AssistantMessageTypedDict, ) - from .audiochunk import AudioChunk, AudioChunkType, AudioChunkTypedDict + from .audiochunk import AudioChunk, AudioChunkTypedDict from .audioencoding import AudioEncoding from .audioformat import AudioFormat, AudioFormatTypedDict from .audiotranscriptionrequest import ( @@ -172,7 +164,7 @@ AudioTranscriptionRequestStream, AudioTranscriptionRequestStreamTypedDict, ) - from .basemodelcard import BaseModelCard, BaseModelCardType, BaseModelCardTypedDict + from .basemodelcard import BaseModelCard, BaseModelCardTypedDict from .batcherror import BatchError, BatchErrorTypedDict from .batchjobin import BatchJobIn, BatchJobInTypedDict from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict @@ -186,18 +178,18 @@ ) from .chatcompletionchoice import ( ChatCompletionChoice, + ChatCompletionChoiceFinishReason, ChatCompletionChoiceTypedDict, - FinishReason, ) from .chatcompletionrequest import ( ChatCompletionRequest, + ChatCompletionRequestMessage, + ChatCompletionRequestMessageTypedDict, + ChatCompletionRequestStop, + ChatCompletionRequestStopTypedDict, ChatCompletionRequestToolChoice, ChatCompletionRequestToolChoiceTypedDict, ChatCompletionRequestTypedDict, - Messages, - MessagesTypedDict, - Stop, - StopTypedDict, ) from .chatcompletionresponse import ( ChatCompletionResponse, @@ -205,8 +197,8 @@ ) from .chatcompletionstreamrequest import ( ChatCompletionStreamRequest, - ChatCompletionStreamRequestMessages, - ChatCompletionStreamRequestMessagesTypedDict, + ChatCompletionStreamRequestMessage, + ChatCompletionStreamRequestMessageTypedDict, ChatCompletionStreamRequestStop, ChatCompletionStreamRequestStopTypedDict, ChatCompletionStreamRequestToolChoice, @@ -215,13 +207,13 @@ ) from .chatmoderationrequest import ( ChatModerationRequest, - ChatModerationRequestInputs, - ChatModerationRequestInputsTypedDict, + ChatModerationRequestInputs1, + ChatModerationRequestInputs1TypedDict, + ChatModerationRequestInputs2, + ChatModerationRequestInputs2TypedDict, + ChatModerationRequestInputs3, + ChatModerationRequestInputs3TypedDict, ChatModerationRequestTypedDict, - One, - OneTypedDict, - Two, - TwoTypedDict, ) from .checkpointout import CheckpointOut, CheckpointOutTypedDict from .classificationrequest import ( @@ -240,24 +232,21 @@ ) from .classifierdetailedjobout import ( ClassifierDetailedJobOut, - ClassifierDetailedJobOutIntegrations, - ClassifierDetailedJobOutIntegrationsTypedDict, - ClassifierDetailedJobOutJobType, + ClassifierDetailedJobOutIntegration, + ClassifierDetailedJobOutIntegrationTypedDict, ClassifierDetailedJobOutObject, ClassifierDetailedJobOutStatus, ClassifierDetailedJobOutTypedDict, ) from .classifierftmodelout import ( ClassifierFTModelOut, - ClassifierFTModelOutModelType, ClassifierFTModelOutObject, ClassifierFTModelOutTypedDict, ) from .classifierjobout import ( ClassifierJobOut, - ClassifierJobOutIntegrations, - ClassifierJobOutIntegrationsTypedDict, - ClassifierJobOutJobType, + ClassifierJobOutIntegration, + ClassifierJobOutIntegrationTypedDict, ClassifierJobOutObject, ClassifierJobOutStatus, ClassifierJobOutTypedDict, @@ -272,22 +261,17 @@ ClassifierTrainingParametersIn, ClassifierTrainingParametersInTypedDict, ) - from .codeinterpretertool import ( - CodeInterpreterTool, - CodeInterpreterToolType, - CodeInterpreterToolTypedDict, - ) + from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict from .completionargs import CompletionArgs, CompletionArgsTypedDict from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict from .completionchunk import CompletionChunk, CompletionChunkTypedDict from .completiondetailedjobout import ( CompletionDetailedJobOut, - CompletionDetailedJobOutIntegrations, - CompletionDetailedJobOutIntegrationsTypedDict, - CompletionDetailedJobOutJobType, + CompletionDetailedJobOutIntegration, + CompletionDetailedJobOutIntegrationTypedDict, CompletionDetailedJobOutObject, - CompletionDetailedJobOutRepositories, - CompletionDetailedJobOutRepositoriesTypedDict, + CompletionDetailedJobOutRepository, + CompletionDetailedJobOutRepositoryTypedDict, CompletionDetailedJobOutStatus, CompletionDetailedJobOutTypedDict, ) @@ -296,18 +280,16 @@ CompletionFTModelOut, CompletionFTModelOutObject, CompletionFTModelOutTypedDict, - ModelType, ) from .completionjobout import ( CompletionJobOut, + CompletionJobOutIntegration, + CompletionJobOutIntegrationTypedDict, CompletionJobOutObject, + CompletionJobOutRepository, + CompletionJobOutRepositoryTypedDict, + CompletionJobOutStatus, CompletionJobOutTypedDict, - Integrations, - IntegrationsTypedDict, - JobType, - Repositories, - RepositoriesTypedDict, - Status, ) from .completionresponsestreamchoice import ( CompletionResponseStreamChoice, @@ -343,8 +325,8 @@ ConversationHistory, ConversationHistoryObject, ConversationHistoryTypedDict, - Entries, - EntriesTypedDict, + Entry, + EntryTypedDict, ) from .conversationinputs import ConversationInputs, ConversationInputsTypedDict from .conversationmessages import ( @@ -353,20 +335,20 @@ ConversationMessagesTypedDict, ) from .conversationrequest import ( - AgentVersion, - AgentVersionTypedDict, ConversationRequest, + ConversationRequestAgentVersion, + ConversationRequestAgentVersionTypedDict, + ConversationRequestHandoffExecution, + ConversationRequestTool, + ConversationRequestToolTypedDict, ConversationRequestTypedDict, - HandoffExecution, - Tools, - ToolsTypedDict, ) from .conversationresponse import ( ConversationResponse, ConversationResponseObject, ConversationResponseTypedDict, - Outputs, - OutputsTypedDict, + Output, + OutputTypedDict, ) from .conversationrestartrequest import ( ConversationRestartRequest, @@ -387,8 +369,8 @@ ConversationStreamRequestAgentVersion, ConversationStreamRequestAgentVersionTypedDict, ConversationStreamRequestHandoffExecution, - ConversationStreamRequestTools, - ConversationStreamRequestToolsTypedDict, + ConversationStreamRequestTool, + ConversationStreamRequestToolTypedDict, ConversationStreamRequestTypedDict, ) from .conversationusageinfo import ( @@ -402,16 +384,12 @@ from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict from .deltamessage import ( - Content, - ContentTypedDict, DeltaMessage, + DeltaMessageContent, + DeltaMessageContentTypedDict, DeltaMessageTypedDict, ) - from .documentlibrarytool import ( - DocumentLibraryTool, - DocumentLibraryToolType, - DocumentLibraryToolTypedDict, - ) + from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict from .documentout import DocumentOut, DocumentOutTypedDict from .documenttextcontent import DocumentTextContent, DocumentTextContentTypedDict from .documentupdatein import ( @@ -464,8 +442,8 @@ FilesAPIRoutesRetrieveFileRequestTypedDict, ) from .files_api_routes_upload_fileop import ( - FilesAPIRoutesUploadFileMultiPartBodyParams, - FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, + MultiPartBodyParams, + MultiPartBodyParamsTypedDict, ) from .fileschema import FileSchema, FileSchemaTypedDict from .filesignedurl import FileSignedURL, FileSignedURLTypedDict @@ -491,7 +469,7 @@ FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict, ) - from .ftmodelcard import FTModelCard, FTModelCardType, FTModelCardTypedDict + from .ftmodelcard import FTModelCard, FTModelCardTypedDict from .function import Function, FunctionTypedDict from .functioncall import ( Arguments, @@ -509,11 +487,7 @@ FunctionCallEntryArguments, FunctionCallEntryArgumentsTypedDict, ) - from .functioncallevent import ( - FunctionCallEvent, - FunctionCallEventType, - FunctionCallEventTypedDict, - ) + from .functioncallevent import FunctionCallEvent, FunctionCallEventTypedDict from .functionname import FunctionName, FunctionNameTypedDict from .functionresultentry import ( FunctionResultEntry, @@ -521,54 +495,42 @@ FunctionResultEntryType, FunctionResultEntryTypedDict, ) - from .functiontool import FunctionTool, FunctionToolType, FunctionToolTypedDict - from .githubrepositoryin import ( - GithubRepositoryIn, - GithubRepositoryInType, - GithubRepositoryInTypedDict, - ) - from .githubrepositoryout import ( - GithubRepositoryOut, - GithubRepositoryOutType, - GithubRepositoryOutTypedDict, - ) + from .functiontool import FunctionTool, FunctionToolTypedDict + from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict + from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData - from .imagegenerationtool import ( - ImageGenerationTool, - ImageGenerationToolType, - ImageGenerationToolTypedDict, - ) + from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict from .imageurl import ImageURL, ImageURLTypedDict from .imageurlchunk import ( ImageURLChunk, - ImageURLChunkImageURL, - ImageURLChunkImageURLTypedDict, ImageURLChunkType, ImageURLChunkTypedDict, + ImageURLUnion, + ImageURLUnionTypedDict, ) from .inputentries import InputEntries, InputEntriesTypedDict from .inputs import ( Inputs, + InputsMessage, + InputsMessageTypedDict, InputsTypedDict, InstructRequestInputs, - InstructRequestInputsMessages, - InstructRequestInputsMessagesTypedDict, InstructRequestInputsTypedDict, ) from .instructrequest import ( InstructRequest, - InstructRequestMessages, - InstructRequestMessagesTypedDict, + InstructRequestMessage, + InstructRequestMessageTypedDict, InstructRequestTypedDict, ) from .jobin import ( Hyperparameters, HyperparametersTypedDict, JobIn, - JobInIntegrations, - JobInIntegrationsTypedDict, - JobInRepositories, - JobInRepositoriesTypedDict, + JobInIntegration, + JobInIntegrationTypedDict, + JobInRepository, + JobInRepositoryTypedDict, JobInTypedDict, ) from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict @@ -597,8 +559,8 @@ from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( JobsAPIRoutesFineTuningCreateFineTuningJobResponse, JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, - Response1, - Response1TypedDict, + Response, + ResponseTypedDict, ) from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( JobsAPIRoutesFineTuningGetFineTuningJobRequest, @@ -609,7 +571,7 @@ from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( JobsAPIRoutesFineTuningGetFineTuningJobsRequest, JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, - QueryParamStatus, + JobsAPIRoutesFineTuningGetFineTuningJobsStatus, ) from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( JobsAPIRoutesFineTuningStartFineTuningJobRequest, @@ -681,8 +643,8 @@ LibrariesDocumentsUpdateV1RequestTypedDict, ) from .libraries_documents_upload_v1op import ( - LibrariesDocumentsUploadV1DocumentUpload, - LibrariesDocumentsUploadV1DocumentUploadTypedDict, + DocumentUpload, + DocumentUploadTypedDict, LibrariesDocumentsUploadV1Request, LibrariesDocumentsUploadV1RequestTypedDict, ) @@ -722,10 +684,10 @@ MessageInputEntry, MessageInputEntryContent, MessageInputEntryContentTypedDict, + MessageInputEntryObject, MessageInputEntryRole, MessageInputEntryType, MessageInputEntryTypedDict, - Object, ) from .messageoutputcontentchunks import ( MessageOutputContentChunks, @@ -745,7 +707,6 @@ MessageOutputEventContent, MessageOutputEventContentTypedDict, MessageOutputEventRole, - MessageOutputEventType, MessageOutputEventTypedDict, ) from .metricout import MetricOut, MetricOutTypedDict @@ -754,11 +715,16 @@ from .modelconversation import ( ModelConversation, ModelConversationObject, - ModelConversationTools, - ModelConversationToolsTypedDict, + ModelConversationTool, + ModelConversationToolTypedDict, ModelConversationTypedDict, ) - from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict + from .modellist import ( + ModelList, + ModelListData, + ModelListDataTypedDict, + ModelListTypedDict, + ) from .moderationobject import ModerationObject, ModerationObjectTypedDict from .moderationresponse import ModerationResponse, ModerationResponseTypedDict from .no_response_error import NoResponseError @@ -784,9 +750,9 @@ RealtimeTranscriptionErrorTypedDict, ) from .realtimetranscriptionerrordetail import ( - Message, - MessageTypedDict, RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionErrorDetailMessage, + RealtimeTranscriptionErrorDetailMessageTypedDict, RealtimeTranscriptionErrorDetailTypedDict, ) from .realtimetranscriptionsession import ( @@ -807,29 +773,20 @@ ReferenceChunkTypedDict, ) from .requestsource import RequestSource - from .responsedoneevent import ( - ResponseDoneEvent, - ResponseDoneEventType, - ResponseDoneEventTypedDict, - ) - from .responseerrorevent import ( - ResponseErrorEvent, - ResponseErrorEventType, - ResponseErrorEventTypedDict, - ) + from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict + from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats from .responsestartedevent import ( ResponseStartedEvent, - ResponseStartedEventType, ResponseStartedEventTypedDict, ) from .responsevalidationerror import ResponseValidationError from .retrieve_model_v1_models_model_id_getop import ( + ResponseRetrieveModelV1ModelsModelIDGet, + ResponseRetrieveModelV1ModelsModelIDGetTypedDict, RetrieveModelV1ModelsModelIDGetRequest, RetrieveModelV1ModelsModelIDGetRequestTypedDict, - RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, - RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict, ) from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict from .sampletype import SampleType @@ -842,7 +799,6 @@ from .source import Source from .ssetypes import SSETypes from .systemmessage import ( - Role, SystemMessage, SystemMessageContent, SystemMessageContentTypedDict, @@ -869,20 +825,18 @@ ToolExecutionDeltaEvent, ToolExecutionDeltaEventName, ToolExecutionDeltaEventNameTypedDict, - ToolExecutionDeltaEventType, ToolExecutionDeltaEventTypedDict, ) from .toolexecutiondoneevent import ( ToolExecutionDoneEvent, ToolExecutionDoneEventName, ToolExecutionDoneEventNameTypedDict, - ToolExecutionDoneEventType, ToolExecutionDoneEventTypedDict, ) from .toolexecutionentry import ( - Name, - NameTypedDict, ToolExecutionEntry, + ToolExecutionEntryName, + ToolExecutionEntryNameTypedDict, ToolExecutionEntryObject, ToolExecutionEntryType, ToolExecutionEntryTypedDict, @@ -891,7 +845,6 @@ ToolExecutionStartedEvent, ToolExecutionStartedEventName, ToolExecutionStartedEventNameTypedDict, - ToolExecutionStartedEventType, ToolExecutionStartedEventTypedDict, ) from .toolfilechunk import ( @@ -905,7 +858,6 @@ ToolMessage, ToolMessageContent, ToolMessageContentTypedDict, - ToolMessageRole, ToolMessageTypedDict, ) from .toolreferencechunk import ( @@ -923,12 +875,11 @@ ) from .transcriptionsegmentchunk import ( TranscriptionSegmentChunk, + TranscriptionSegmentChunkType, TranscriptionSegmentChunkTypedDict, - Type, ) from .transcriptionstreamdone import ( TranscriptionStreamDone, - TranscriptionStreamDoneType, TranscriptionStreamDoneTypedDict, ) from .transcriptionstreamevents import ( @@ -940,17 +891,14 @@ from .transcriptionstreameventtypes import TranscriptionStreamEventTypes from .transcriptionstreamlanguage import ( TranscriptionStreamLanguage, - TranscriptionStreamLanguageType, TranscriptionStreamLanguageTypedDict, ) from .transcriptionstreamsegmentdelta import ( TranscriptionStreamSegmentDelta, - TranscriptionStreamSegmentDeltaType, TranscriptionStreamSegmentDeltaTypedDict, ) from .transcriptionstreamtextdelta import ( TranscriptionStreamTextDelta, - TranscriptionStreamTextDeltaType, TranscriptionStreamTextDeltaTypedDict, ) from .unarchiveftmodelout import ( @@ -965,7 +913,6 @@ UserMessage, UserMessageContent, UserMessageContentTypedDict, - UserMessageRole, UserMessageTypedDict, ) from .validationerror import ( @@ -974,22 +921,13 @@ ValidationError, ValidationErrorTypedDict, ) - from .wandbintegration import ( - WandbIntegration, - WandbIntegrationType, - WandbIntegrationTypedDict, - ) - from .wandbintegrationout import ( - WandbIntegrationOut, - WandbIntegrationOutType, - WandbIntegrationOutTypedDict, - ) + from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict + from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict from .websearchpremiumtool import ( WebSearchPremiumTool, - WebSearchPremiumToolType, WebSearchPremiumToolTypedDict, ) - from .websearchtool import WebSearchTool, WebSearchToolType, WebSearchToolTypedDict + from .websearchtool import WebSearchTool, WebSearchToolTypedDict __all__ = [ "APIEndpoint", @@ -1002,33 +940,31 @@ "AgentConversationObject", "AgentConversationTypedDict", "AgentCreationRequest", - "AgentCreationRequestTools", - "AgentCreationRequestToolsTypedDict", + "AgentCreationRequestTool", + "AgentCreationRequestToolTypedDict", "AgentCreationRequestTypedDict", "AgentHandoffDoneEvent", - "AgentHandoffDoneEventType", "AgentHandoffDoneEventTypedDict", "AgentHandoffEntry", "AgentHandoffEntryObject", "AgentHandoffEntryType", "AgentHandoffEntryTypedDict", "AgentHandoffStartedEvent", - "AgentHandoffStartedEventType", "AgentHandoffStartedEventTypedDict", "AgentObject", - "AgentTools", - "AgentToolsTypedDict", + "AgentTool", + "AgentToolTypedDict", "AgentTypedDict", "AgentUpdateRequest", - "AgentUpdateRequestTools", - "AgentUpdateRequestToolsTypedDict", + "AgentUpdateRequestTool", + "AgentUpdateRequestToolTypedDict", "AgentUpdateRequestTypedDict", - "AgentVersion", - "AgentVersionTypedDict", "AgentsAPIV1AgentsCreateOrUpdateAliasRequest", "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict", "AgentsAPIV1AgentsDeleteRequest", "AgentsAPIV1AgentsDeleteRequestTypedDict", + "AgentsAPIV1AgentsGetAgentVersion", + "AgentsAPIV1AgentsGetAgentVersionTypedDict", "AgentsAPIV1AgentsGetRequest", "AgentsAPIV1AgentsGetRequestTypedDict", "AgentsAPIV1AgentsGetVersionRequest", @@ -1051,12 +987,12 @@ "AgentsAPIV1ConversationsDeleteRequestTypedDict", "AgentsAPIV1ConversationsGetRequest", "AgentsAPIV1ConversationsGetRequestTypedDict", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict", "AgentsAPIV1ConversationsHistoryRequest", "AgentsAPIV1ConversationsHistoryRequestTypedDict", "AgentsAPIV1ConversationsListRequest", "AgentsAPIV1ConversationsListRequestTypedDict", + "AgentsAPIV1ConversationsListResponse", + "AgentsAPIV1ConversationsListResponseTypedDict", "AgentsAPIV1ConversationsMessagesRequest", "AgentsAPIV1ConversationsMessagesRequestTypedDict", "AgentsAPIV1ConversationsRestartRequest", @@ -1064,16 +1000,16 @@ "AgentsAPIV1ConversationsRestartStreamRequest", "AgentsAPIV1ConversationsRestartStreamRequestTypedDict", "AgentsCompletionRequest", - "AgentsCompletionRequestMessages", - "AgentsCompletionRequestMessagesTypedDict", + "AgentsCompletionRequestMessage", + "AgentsCompletionRequestMessageTypedDict", "AgentsCompletionRequestStop", "AgentsCompletionRequestStopTypedDict", "AgentsCompletionRequestToolChoice", "AgentsCompletionRequestToolChoiceTypedDict", "AgentsCompletionRequestTypedDict", "AgentsCompletionStreamRequest", - "AgentsCompletionStreamRequestMessages", - "AgentsCompletionStreamRequestMessagesTypedDict", + "AgentsCompletionStreamRequestMessage", + "AgentsCompletionStreamRequestMessageTypedDict", "AgentsCompletionStreamRequestStop", "AgentsCompletionStreamRequestStopTypedDict", "AgentsCompletionStreamRequestToolChoice", @@ -1092,7 +1028,6 @@ "Attributes", "AttributesTypedDict", "AudioChunk", - "AudioChunkType", "AudioChunkTypedDict", "AudioEncoding", "AudioFormat", @@ -1102,7 +1037,6 @@ "AudioTranscriptionRequestStreamTypedDict", "AudioTranscriptionRequestTypedDict", "BaseModelCard", - "BaseModelCardType", "BaseModelCardTypedDict", "BatchError", "BatchErrorTypedDict", @@ -1121,24 +1055,33 @@ "ChatClassificationRequest", "ChatClassificationRequestTypedDict", "ChatCompletionChoice", + "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", + "ChatCompletionRequestMessage", + "ChatCompletionRequestMessageTypedDict", + "ChatCompletionRequestStop", + "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestToolChoiceTypedDict", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", - "ChatCompletionStreamRequestMessages", - "ChatCompletionStreamRequestMessagesTypedDict", + "ChatCompletionStreamRequestMessage", + "ChatCompletionStreamRequestMessageTypedDict", "ChatCompletionStreamRequestStop", "ChatCompletionStreamRequestStopTypedDict", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestToolChoiceTypedDict", "ChatCompletionStreamRequestTypedDict", "ChatModerationRequest", - "ChatModerationRequestInputs", - "ChatModerationRequestInputsTypedDict", + "ChatModerationRequestInputs1", + "ChatModerationRequestInputs1TypedDict", + "ChatModerationRequestInputs2", + "ChatModerationRequestInputs2TypedDict", + "ChatModerationRequestInputs3", + "ChatModerationRequestInputs3TypedDict", "ChatModerationRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", @@ -1151,20 +1094,17 @@ "ClassificationTargetResult", "ClassificationTargetResultTypedDict", "ClassifierDetailedJobOut", - "ClassifierDetailedJobOutIntegrations", - "ClassifierDetailedJobOutIntegrationsTypedDict", - "ClassifierDetailedJobOutJobType", + "ClassifierDetailedJobOutIntegration", + "ClassifierDetailedJobOutIntegrationTypedDict", "ClassifierDetailedJobOutObject", "ClassifierDetailedJobOutStatus", "ClassifierDetailedJobOutTypedDict", "ClassifierFTModelOut", - "ClassifierFTModelOutModelType", "ClassifierFTModelOutObject", "ClassifierFTModelOutTypedDict", "ClassifierJobOut", - "ClassifierJobOutIntegrations", - "ClassifierJobOutIntegrationsTypedDict", - "ClassifierJobOutJobType", + "ClassifierJobOutIntegration", + "ClassifierJobOutIntegrationTypedDict", "ClassifierJobOutObject", "ClassifierJobOutStatus", "ClassifierJobOutTypedDict", @@ -1177,7 +1117,6 @@ "ClassifierTrainingParametersInTypedDict", "ClassifierTrainingParametersTypedDict", "CodeInterpreterTool", - "CodeInterpreterToolType", "CodeInterpreterToolTypedDict", "CompletionArgs", "CompletionArgsStop", @@ -1186,12 +1125,11 @@ "CompletionChunk", "CompletionChunkTypedDict", "CompletionDetailedJobOut", - "CompletionDetailedJobOutIntegrations", - "CompletionDetailedJobOutIntegrationsTypedDict", - "CompletionDetailedJobOutJobType", + "CompletionDetailedJobOutIntegration", + "CompletionDetailedJobOutIntegrationTypedDict", "CompletionDetailedJobOutObject", - "CompletionDetailedJobOutRepositories", - "CompletionDetailedJobOutRepositoriesTypedDict", + "CompletionDetailedJobOutRepository", + "CompletionDetailedJobOutRepositoryTypedDict", "CompletionDetailedJobOutStatus", "CompletionDetailedJobOutTypedDict", "CompletionEvent", @@ -1200,7 +1138,12 @@ "CompletionFTModelOutObject", "CompletionFTModelOutTypedDict", "CompletionJobOut", + "CompletionJobOutIntegration", + "CompletionJobOutIntegrationTypedDict", "CompletionJobOutObject", + "CompletionJobOutRepository", + "CompletionJobOutRepositoryTypedDict", + "CompletionJobOutStatus", "CompletionJobOutTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", @@ -1209,10 +1152,8 @@ "CompletionTrainingParametersIn", "CompletionTrainingParametersInTypedDict", "CompletionTrainingParametersTypedDict", - "Content", "ContentChunk", "ContentChunkTypedDict", - "ContentTypedDict", "ConversationAppendRequest", "ConversationAppendRequestHandoffExecution", "ConversationAppendRequestTypedDict", @@ -1232,6 +1173,11 @@ "ConversationMessagesObject", "ConversationMessagesTypedDict", "ConversationRequest", + "ConversationRequestAgentVersion", + "ConversationRequestAgentVersionTypedDict", + "ConversationRequestHandoffExecution", + "ConversationRequestTool", + "ConversationRequestToolTypedDict", "ConversationRequestTypedDict", "ConversationResponse", "ConversationResponseObject", @@ -1250,13 +1196,11 @@ "ConversationStreamRequestAgentVersion", "ConversationStreamRequestAgentVersionTypedDict", "ConversationStreamRequestHandoffExecution", - "ConversationStreamRequestTools", - "ConversationStreamRequestToolsTypedDict", + "ConversationStreamRequestTool", + "ConversationStreamRequestToolTypedDict", "ConversationStreamRequestTypedDict", "ConversationUsageInfo", "ConversationUsageInfoTypedDict", - "Data", - "DataTypedDict", "DeleteFileOut", "DeleteFileOutTypedDict", "DeleteModelOut", @@ -1264,10 +1208,11 @@ "DeleteModelV1ModelsModelIDDeleteRequest", "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", + "DeltaMessageContent", + "DeltaMessageContentTypedDict", "DeltaMessageTypedDict", "Document", "DocumentLibraryTool", - "DocumentLibraryToolType", "DocumentLibraryToolTypedDict", "DocumentOut", "DocumentOutTypedDict", @@ -1279,6 +1224,8 @@ "DocumentURLChunkTypedDict", "DocumentUpdateIn", "DocumentUpdateInTypedDict", + "DocumentUpload", + "DocumentUploadTypedDict", "EmbeddingDtype", "EmbeddingRequest", "EmbeddingRequestInputs", @@ -1290,8 +1237,8 @@ "EmbeddingResponseTypedDict", "EncodingFormat", "EntityType", - "Entries", - "EntriesTypedDict", + "Entry", + "EntryTypedDict", "EventOut", "EventOutTypedDict", "FIMCompletionRequest", @@ -1308,7 +1255,6 @@ "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelCard", - "FTModelCardType", "FTModelCardTypedDict", "File", "FileChunk", @@ -1329,10 +1275,7 @@ "FilesAPIRoutesListFilesRequestTypedDict", "FilesAPIRoutesRetrieveFileRequest", "FilesAPIRoutesRetrieveFileRequestTypedDict", - "FilesAPIRoutesUploadFileMultiPartBodyParams", - "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", "FineTuneableModelType", - "FinishReason", "Format", "Function", "FunctionCall", @@ -1343,7 +1286,6 @@ "FunctionCallEntryType", "FunctionCallEntryTypedDict", "FunctionCallEvent", - "FunctionCallEventType", "FunctionCallEventTypedDict", "FunctionCallTypedDict", "FunctionName", @@ -1353,55 +1295,47 @@ "FunctionResultEntryType", "FunctionResultEntryTypedDict", "FunctionTool", - "FunctionToolType", "FunctionToolTypedDict", "FunctionTypedDict", "GithubRepositoryIn", - "GithubRepositoryInType", "GithubRepositoryInTypedDict", "GithubRepositoryOut", - "GithubRepositoryOutType", "GithubRepositoryOutTypedDict", "HTTPValidationError", "HTTPValidationErrorData", - "HandoffExecution", "Hyperparameters", "HyperparametersTypedDict", "ImageGenerationTool", - "ImageGenerationToolType", "ImageGenerationToolTypedDict", "ImageURL", "ImageURLChunk", - "ImageURLChunkImageURL", - "ImageURLChunkImageURLTypedDict", "ImageURLChunkType", "ImageURLChunkTypedDict", "ImageURLTypedDict", + "ImageURLUnion", + "ImageURLUnionTypedDict", "InputEntries", "InputEntriesTypedDict", "Inputs", + "InputsMessage", + "InputsMessageTypedDict", "InputsTypedDict", "InstructRequest", "InstructRequestInputs", - "InstructRequestInputsMessages", - "InstructRequestInputsMessagesTypedDict", "InstructRequestInputsTypedDict", - "InstructRequestMessages", - "InstructRequestMessagesTypedDict", + "InstructRequestMessage", + "InstructRequestMessageTypedDict", "InstructRequestTypedDict", - "Integrations", - "IntegrationsTypedDict", "JSONSchema", "JSONSchemaTypedDict", "JobIn", - "JobInIntegrations", - "JobInIntegrationsTypedDict", - "JobInRepositories", - "JobInRepositoriesTypedDict", + "JobInIntegration", + "JobInIntegrationTypedDict", + "JobInRepository", + "JobInRepositoryTypedDict", "JobInTypedDict", "JobMetadataOut", "JobMetadataOutTypedDict", - "JobType", "JobsAPIRoutesBatchCancelBatchJobRequest", "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict", "JobsAPIRoutesBatchGetBatchJobRequest", @@ -1422,6 +1356,7 @@ "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobsStatus", "JobsAPIRoutesFineTuningStartFineTuningJobRequest", "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningStartFineTuningJobResponse", @@ -1460,8 +1395,6 @@ "LibrariesDocumentsReprocessV1RequestTypedDict", "LibrariesDocumentsUpdateV1Request", "LibrariesDocumentsUpdateV1RequestTypedDict", - "LibrariesDocumentsUploadV1DocumentUpload", - "LibrariesDocumentsUploadV1DocumentUploadTypedDict", "LibrariesDocumentsUploadV1Request", "LibrariesDocumentsUploadV1RequestTypedDict", "LibrariesGetV1Request", @@ -1490,7 +1423,6 @@ "ListSharingOutTypedDict", "Loc", "LocTypedDict", - "Message", "MessageEntries", "MessageEntriesTypedDict", "MessageInputContentChunks", @@ -1498,6 +1430,7 @@ "MessageInputEntry", "MessageInputEntryContent", "MessageInputEntryContentTypedDict", + "MessageInputEntryObject", "MessageInputEntryRole", "MessageInputEntryType", "MessageInputEntryTypedDict", @@ -1514,11 +1447,7 @@ "MessageOutputEventContent", "MessageOutputEventContentTypedDict", "MessageOutputEventRole", - "MessageOutputEventType", "MessageOutputEventTypedDict", - "MessageTypedDict", - "Messages", - "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", "MistralError", @@ -1527,18 +1456,19 @@ "ModelCapabilitiesTypedDict", "ModelConversation", "ModelConversationObject", - "ModelConversationTools", - "ModelConversationToolsTypedDict", + "ModelConversationTool", + "ModelConversationToolTypedDict", "ModelConversationTypedDict", "ModelList", + "ModelListData", + "ModelListDataTypedDict", "ModelListTypedDict", - "ModelType", "ModerationObject", "ModerationObjectTypedDict", "ModerationResponse", "ModerationResponseTypedDict", - "Name", - "NameTypedDict", + "MultiPartBodyParams", + "MultiPartBodyParamsTypedDict", "NoResponseError", "OCRImageObject", "OCRImageObjectTypedDict", @@ -1554,24 +1484,20 @@ "OCRTableObjectTypedDict", "OCRUsageInfo", "OCRUsageInfoTypedDict", - "Object", - "One", - "OneTypedDict", + "Output", "OutputContentChunks", "OutputContentChunksTypedDict", - "Outputs", - "OutputsTypedDict", + "OutputTypedDict", "PaginationInfo", "PaginationInfoTypedDict", "Prediction", "PredictionTypedDict", "ProcessingStatusOut", "ProcessingStatusOutTypedDict", - "QueryParamAgentVersion", - "QueryParamAgentVersionTypedDict", - "QueryParamStatus", "RealtimeTranscriptionError", "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionErrorDetailMessage", + "RealtimeTranscriptionErrorDetailMessageTypedDict", "RealtimeTranscriptionErrorDetailTypedDict", "RealtimeTranscriptionErrorTypedDict", "RealtimeTranscriptionSession", @@ -1583,33 +1509,27 @@ "ReferenceChunk", "ReferenceChunkType", "ReferenceChunkTypedDict", - "Repositories", - "RepositoriesTypedDict", "RequestSource", - "Response1", - "Response1TypedDict", - "ResponseBody", - "ResponseBodyTypedDict", + "Response", "ResponseDoneEvent", - "ResponseDoneEventType", "ResponseDoneEventTypedDict", "ResponseErrorEvent", - "ResponseErrorEventType", "ResponseErrorEventTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", + "ResponseRetrieveModelV1ModelsModelIDGet", + "ResponseRetrieveModelV1ModelsModelIDGetTypedDict", "ResponseStartedEvent", - "ResponseStartedEventType", "ResponseStartedEventTypedDict", + "ResponseTypedDict", + "ResponseV1ConversationsGet", + "ResponseV1ConversationsGetTypedDict", "ResponseValidationError", "RetrieveFileOut", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", "RetrieveModelV1ModelsModelIDGetRequestTypedDict", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", - "Role", "SDKError", "SSETypes", "SampleType", @@ -1623,9 +1543,6 @@ "SharingOut", "SharingOutTypedDict", "Source", - "Status", - "Stop", - "StopTypedDict", "SystemMessage", "SystemMessageContent", "SystemMessageContentChunks", @@ -1651,21 +1568,20 @@ "ToolExecutionDeltaEvent", "ToolExecutionDeltaEventName", "ToolExecutionDeltaEventNameTypedDict", - "ToolExecutionDeltaEventType", "ToolExecutionDeltaEventTypedDict", "ToolExecutionDoneEvent", "ToolExecutionDoneEventName", "ToolExecutionDoneEventNameTypedDict", - "ToolExecutionDoneEventType", "ToolExecutionDoneEventTypedDict", "ToolExecutionEntry", + "ToolExecutionEntryName", + "ToolExecutionEntryNameTypedDict", "ToolExecutionEntryObject", "ToolExecutionEntryType", "ToolExecutionEntryTypedDict", "ToolExecutionStartedEvent", "ToolExecutionStartedEventName", "ToolExecutionStartedEventNameTypedDict", - "ToolExecutionStartedEventType", "ToolExecutionStartedEventTypedDict", "ToolFileChunk", "ToolFileChunkTool", @@ -1675,7 +1591,6 @@ "ToolMessage", "ToolMessageContent", "ToolMessageContentTypedDict", - "ToolMessageRole", "ToolMessageTypedDict", "ToolReferenceChunk", "ToolReferenceChunkTool", @@ -1684,16 +1599,14 @@ "ToolReferenceChunkTypedDict", "ToolTypedDict", "ToolTypes", - "Tools", - "ToolsTypedDict", "TrainingFile", "TrainingFileTypedDict", "TranscriptionResponse", "TranscriptionResponseTypedDict", "TranscriptionSegmentChunk", + "TranscriptionSegmentChunkType", "TranscriptionSegmentChunkTypedDict", "TranscriptionStreamDone", - "TranscriptionStreamDoneType", "TranscriptionStreamDoneTypedDict", "TranscriptionStreamEventTypes", "TranscriptionStreamEvents", @@ -1701,17 +1614,11 @@ "TranscriptionStreamEventsDataTypedDict", "TranscriptionStreamEventsTypedDict", "TranscriptionStreamLanguage", - "TranscriptionStreamLanguageType", "TranscriptionStreamLanguageTypedDict", "TranscriptionStreamSegmentDelta", - "TranscriptionStreamSegmentDeltaType", "TranscriptionStreamSegmentDeltaTypedDict", "TranscriptionStreamTextDelta", - "TranscriptionStreamTextDeltaType", "TranscriptionStreamTextDeltaTypedDict", - "Two", - "TwoTypedDict", - "Type", "UnarchiveFTModelOut", "UnarchiveFTModelOutObject", "UnarchiveFTModelOutTypedDict", @@ -1724,29 +1631,24 @@ "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", - "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict", "WandbIntegration", "WandbIntegrationOut", - "WandbIntegrationOutType", "WandbIntegrationOutTypedDict", - "WandbIntegrationType", "WandbIntegrationTypedDict", "WebSearchPremiumTool", - "WebSearchPremiumToolType", "WebSearchPremiumToolTypedDict", "WebSearchTool", - "WebSearchToolType", "WebSearchToolTypedDict", ] _dynamic_imports: dict[str, str] = { "Agent": ".agent", "AgentObject": ".agent", - "AgentTools": ".agent", - "AgentToolsTypedDict": ".agent", + "AgentTool": ".agent", + "AgentToolTypedDict": ".agent", "AgentTypedDict": ".agent", "AgentAliasResponse": ".agentaliasresponse", "AgentAliasResponseTypedDict": ".agentaliasresponse", @@ -1756,18 +1658,16 @@ "AgentConversationObject": ".agentconversation", "AgentConversationTypedDict": ".agentconversation", "AgentCreationRequest": ".agentcreationrequest", - "AgentCreationRequestTools": ".agentcreationrequest", - "AgentCreationRequestToolsTypedDict": ".agentcreationrequest", + "AgentCreationRequestTool": ".agentcreationrequest", + "AgentCreationRequestToolTypedDict": ".agentcreationrequest", "AgentCreationRequestTypedDict": ".agentcreationrequest", "AgentHandoffDoneEvent": ".agenthandoffdoneevent", - "AgentHandoffDoneEventType": ".agenthandoffdoneevent", "AgentHandoffDoneEventTypedDict": ".agenthandoffdoneevent", "AgentHandoffEntry": ".agenthandoffentry", "AgentHandoffEntryObject": ".agenthandoffentry", "AgentHandoffEntryType": ".agenthandoffentry", "AgentHandoffEntryTypedDict": ".agenthandoffentry", "AgentHandoffStartedEvent": ".agenthandoffstartedevent", - "AgentHandoffStartedEventType": ".agenthandoffstartedevent", "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", "AgentsAPIV1AgentsCreateOrUpdateAliasRequest": ".agents_api_v1_agents_create_or_update_aliasop", "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict": ".agents_api_v1_agents_create_or_update_aliasop", @@ -1775,10 +1675,10 @@ "AgentsAPIV1AgentsDeleteRequestTypedDict": ".agents_api_v1_agents_deleteop", "AgentsAPIV1AgentsGetVersionRequest": ".agents_api_v1_agents_get_versionop", "AgentsAPIV1AgentsGetVersionRequestTypedDict": ".agents_api_v1_agents_get_versionop", + "AgentsAPIV1AgentsGetAgentVersion": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsGetAgentVersionTypedDict": ".agents_api_v1_agents_getop", "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", - "QueryParamAgentVersion": ".agents_api_v1_agents_getop", - "QueryParamAgentVersionTypedDict": ".agents_api_v1_agents_getop", "AgentsAPIV1AgentsListVersionAliasesRequest": ".agents_api_v1_agents_list_version_aliasesop", "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict": ".agents_api_v1_agents_list_version_aliasesop", "AgentsAPIV1AgentsListVersionsRequest": ".agents_api_v1_agents_list_versionsop", @@ -1797,14 +1697,14 @@ "AgentsAPIV1ConversationsDeleteRequestTypedDict": ".agents_api_v1_conversations_deleteop", "AgentsAPIV1ConversationsGetRequest": ".agents_api_v1_conversations_getop", "AgentsAPIV1ConversationsGetRequestTypedDict": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGet": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict": ".agents_api_v1_conversations_getop", + "ResponseV1ConversationsGet": ".agents_api_v1_conversations_getop", + "ResponseV1ConversationsGetTypedDict": ".agents_api_v1_conversations_getop", "AgentsAPIV1ConversationsHistoryRequest": ".agents_api_v1_conversations_historyop", "AgentsAPIV1ConversationsHistoryRequestTypedDict": ".agents_api_v1_conversations_historyop", "AgentsAPIV1ConversationsListRequest": ".agents_api_v1_conversations_listop", "AgentsAPIV1ConversationsListRequestTypedDict": ".agents_api_v1_conversations_listop", - "ResponseBody": ".agents_api_v1_conversations_listop", - "ResponseBodyTypedDict": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListResponse": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListResponseTypedDict": ".agents_api_v1_conversations_listop", "AgentsAPIV1ConversationsMessagesRequest": ".agents_api_v1_conversations_messagesop", "AgentsAPIV1ConversationsMessagesRequestTypedDict": ".agents_api_v1_conversations_messagesop", "AgentsAPIV1ConversationsRestartStreamRequest": ".agents_api_v1_conversations_restart_streamop", @@ -1812,24 +1712,24 @@ "AgentsAPIV1ConversationsRestartRequest": ".agents_api_v1_conversations_restartop", "AgentsAPIV1ConversationsRestartRequestTypedDict": ".agents_api_v1_conversations_restartop", "AgentsCompletionRequest": ".agentscompletionrequest", - "AgentsCompletionRequestMessages": ".agentscompletionrequest", - "AgentsCompletionRequestMessagesTypedDict": ".agentscompletionrequest", + "AgentsCompletionRequestMessage": ".agentscompletionrequest", + "AgentsCompletionRequestMessageTypedDict": ".agentscompletionrequest", "AgentsCompletionRequestStop": ".agentscompletionrequest", "AgentsCompletionRequestStopTypedDict": ".agentscompletionrequest", "AgentsCompletionRequestToolChoice": ".agentscompletionrequest", "AgentsCompletionRequestToolChoiceTypedDict": ".agentscompletionrequest", "AgentsCompletionRequestTypedDict": ".agentscompletionrequest", "AgentsCompletionStreamRequest": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestMessages": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestMessagesTypedDict": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestMessage": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestMessageTypedDict": ".agentscompletionstreamrequest", "AgentsCompletionStreamRequestStop": ".agentscompletionstreamrequest", "AgentsCompletionStreamRequestStopTypedDict": ".agentscompletionstreamrequest", "AgentsCompletionStreamRequestToolChoice": ".agentscompletionstreamrequest", "AgentsCompletionStreamRequestToolChoiceTypedDict": ".agentscompletionstreamrequest", "AgentsCompletionStreamRequestTypedDict": ".agentscompletionstreamrequest", "AgentUpdateRequest": ".agentupdaterequest", - "AgentUpdateRequestTools": ".agentupdaterequest", - "AgentUpdateRequestToolsTypedDict": ".agentupdaterequest", + "AgentUpdateRequestTool": ".agentupdaterequest", + "AgentUpdateRequestToolTypedDict": ".agentupdaterequest", "AgentUpdateRequestTypedDict": ".agentupdaterequest", "APIEndpoint": ".apiendpoint", "ArchiveFTModelOut": ".archiveftmodelout", @@ -1841,7 +1741,6 @@ "AssistantMessageRole": ".assistantmessage", "AssistantMessageTypedDict": ".assistantmessage", "AudioChunk": ".audiochunk", - "AudioChunkType": ".audiochunk", "AudioChunkTypedDict": ".audiochunk", "AudioEncoding": ".audioencoding", "AudioFormat": ".audioformat", @@ -1851,7 +1750,6 @@ "AudioTranscriptionRequestStream": ".audiotranscriptionrequeststream", "AudioTranscriptionRequestStreamTypedDict": ".audiotranscriptionrequeststream", "BaseModelCard": ".basemodelcard", - "BaseModelCardType": ".basemodelcard", "BaseModelCardTypedDict": ".basemodelcard", "BatchError": ".batcherror", "BatchErrorTypedDict": ".batcherror", @@ -1870,34 +1768,34 @@ "ChatClassificationRequest": ".chatclassificationrequest", "ChatClassificationRequestTypedDict": ".chatclassificationrequest", "ChatCompletionChoice": ".chatcompletionchoice", + "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", - "FinishReason": ".chatcompletionchoice", "ChatCompletionRequest": ".chatcompletionrequest", + "ChatCompletionRequestMessage": ".chatcompletionrequest", + "ChatCompletionRequestMessageTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestStop": ".chatcompletionrequest", + "ChatCompletionRequestStopTypedDict": ".chatcompletionrequest", "ChatCompletionRequestToolChoice": ".chatcompletionrequest", "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", "ChatCompletionRequestTypedDict": ".chatcompletionrequest", - "Messages": ".chatcompletionrequest", - "MessagesTypedDict": ".chatcompletionrequest", - "Stop": ".chatcompletionrequest", - "StopTypedDict": ".chatcompletionrequest", "ChatCompletionResponse": ".chatcompletionresponse", "ChatCompletionResponseTypedDict": ".chatcompletionresponse", "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestMessages": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestMessagesTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessage": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessageTypedDict": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestStop": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestStopTypedDict": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", "ChatModerationRequest": ".chatmoderationrequest", - "ChatModerationRequestInputs": ".chatmoderationrequest", - "ChatModerationRequestInputsTypedDict": ".chatmoderationrequest", + "ChatModerationRequestInputs1": ".chatmoderationrequest", + "ChatModerationRequestInputs1TypedDict": ".chatmoderationrequest", + "ChatModerationRequestInputs2": ".chatmoderationrequest", + "ChatModerationRequestInputs2TypedDict": ".chatmoderationrequest", + "ChatModerationRequestInputs3": ".chatmoderationrequest", + "ChatModerationRequestInputs3TypedDict": ".chatmoderationrequest", "ChatModerationRequestTypedDict": ".chatmoderationrequest", - "One": ".chatmoderationrequest", - "OneTypedDict": ".chatmoderationrequest", - "Two": ".chatmoderationrequest", - "TwoTypedDict": ".chatmoderationrequest", "CheckpointOut": ".checkpointout", "CheckpointOutTypedDict": ".checkpointout", "ClassificationRequest": ".classificationrequest", @@ -1909,20 +1807,17 @@ "ClassificationTargetResult": ".classificationtargetresult", "ClassificationTargetResultTypedDict": ".classificationtargetresult", "ClassifierDetailedJobOut": ".classifierdetailedjobout", - "ClassifierDetailedJobOutIntegrations": ".classifierdetailedjobout", - "ClassifierDetailedJobOutIntegrationsTypedDict": ".classifierdetailedjobout", - "ClassifierDetailedJobOutJobType": ".classifierdetailedjobout", + "ClassifierDetailedJobOutIntegration": ".classifierdetailedjobout", + "ClassifierDetailedJobOutIntegrationTypedDict": ".classifierdetailedjobout", "ClassifierDetailedJobOutObject": ".classifierdetailedjobout", "ClassifierDetailedJobOutStatus": ".classifierdetailedjobout", "ClassifierDetailedJobOutTypedDict": ".classifierdetailedjobout", "ClassifierFTModelOut": ".classifierftmodelout", - "ClassifierFTModelOutModelType": ".classifierftmodelout", "ClassifierFTModelOutObject": ".classifierftmodelout", "ClassifierFTModelOutTypedDict": ".classifierftmodelout", "ClassifierJobOut": ".classifierjobout", - "ClassifierJobOutIntegrations": ".classifierjobout", - "ClassifierJobOutIntegrationsTypedDict": ".classifierjobout", - "ClassifierJobOutJobType": ".classifierjobout", + "ClassifierJobOutIntegration": ".classifierjobout", + "ClassifierJobOutIntegrationTypedDict": ".classifierjobout", "ClassifierJobOutObject": ".classifierjobout", "ClassifierJobOutStatus": ".classifierjobout", "ClassifierJobOutTypedDict": ".classifierjobout", @@ -1935,7 +1830,6 @@ "ClassifierTrainingParametersIn": ".classifiertrainingparametersin", "ClassifierTrainingParametersInTypedDict": ".classifiertrainingparametersin", "CodeInterpreterTool": ".codeinterpretertool", - "CodeInterpreterToolType": ".codeinterpretertool", "CodeInterpreterToolTypedDict": ".codeinterpretertool", "CompletionArgs": ".completionargs", "CompletionArgsTypedDict": ".completionargs", @@ -1944,12 +1838,11 @@ "CompletionChunk": ".completionchunk", "CompletionChunkTypedDict": ".completionchunk", "CompletionDetailedJobOut": ".completiondetailedjobout", - "CompletionDetailedJobOutIntegrations": ".completiondetailedjobout", - "CompletionDetailedJobOutIntegrationsTypedDict": ".completiondetailedjobout", - "CompletionDetailedJobOutJobType": ".completiondetailedjobout", + "CompletionDetailedJobOutIntegration": ".completiondetailedjobout", + "CompletionDetailedJobOutIntegrationTypedDict": ".completiondetailedjobout", "CompletionDetailedJobOutObject": ".completiondetailedjobout", - "CompletionDetailedJobOutRepositories": ".completiondetailedjobout", - "CompletionDetailedJobOutRepositoriesTypedDict": ".completiondetailedjobout", + "CompletionDetailedJobOutRepository": ".completiondetailedjobout", + "CompletionDetailedJobOutRepositoryTypedDict": ".completiondetailedjobout", "CompletionDetailedJobOutStatus": ".completiondetailedjobout", "CompletionDetailedJobOutTypedDict": ".completiondetailedjobout", "CompletionEvent": ".completionevent", @@ -1957,16 +1850,14 @@ "CompletionFTModelOut": ".completionftmodelout", "CompletionFTModelOutObject": ".completionftmodelout", "CompletionFTModelOutTypedDict": ".completionftmodelout", - "ModelType": ".completionftmodelout", "CompletionJobOut": ".completionjobout", + "CompletionJobOutIntegration": ".completionjobout", + "CompletionJobOutIntegrationTypedDict": ".completionjobout", "CompletionJobOutObject": ".completionjobout", + "CompletionJobOutRepository": ".completionjobout", + "CompletionJobOutRepositoryTypedDict": ".completionjobout", + "CompletionJobOutStatus": ".completionjobout", "CompletionJobOutTypedDict": ".completionjobout", - "Integrations": ".completionjobout", - "IntegrationsTypedDict": ".completionjobout", - "JobType": ".completionjobout", - "Repositories": ".completionjobout", - "RepositoriesTypedDict": ".completionjobout", - "Status": ".completionjobout", "CompletionResponseStreamChoice": ".completionresponsestreamchoice", "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", @@ -1989,25 +1880,25 @@ "ConversationHistory": ".conversationhistory", "ConversationHistoryObject": ".conversationhistory", "ConversationHistoryTypedDict": ".conversationhistory", - "Entries": ".conversationhistory", - "EntriesTypedDict": ".conversationhistory", + "Entry": ".conversationhistory", + "EntryTypedDict": ".conversationhistory", "ConversationInputs": ".conversationinputs", "ConversationInputsTypedDict": ".conversationinputs", "ConversationMessages": ".conversationmessages", "ConversationMessagesObject": ".conversationmessages", "ConversationMessagesTypedDict": ".conversationmessages", - "AgentVersion": ".conversationrequest", - "AgentVersionTypedDict": ".conversationrequest", "ConversationRequest": ".conversationrequest", + "ConversationRequestAgentVersion": ".conversationrequest", + "ConversationRequestAgentVersionTypedDict": ".conversationrequest", + "ConversationRequestHandoffExecution": ".conversationrequest", + "ConversationRequestTool": ".conversationrequest", + "ConversationRequestToolTypedDict": ".conversationrequest", "ConversationRequestTypedDict": ".conversationrequest", - "HandoffExecution": ".conversationrequest", - "Tools": ".conversationrequest", - "ToolsTypedDict": ".conversationrequest", "ConversationResponse": ".conversationresponse", "ConversationResponseObject": ".conversationresponse", "ConversationResponseTypedDict": ".conversationresponse", - "Outputs": ".conversationresponse", - "OutputsTypedDict": ".conversationresponse", + "Output": ".conversationresponse", + "OutputTypedDict": ".conversationresponse", "ConversationRestartRequest": ".conversationrestartrequest", "ConversationRestartRequestAgentVersion": ".conversationrestartrequest", "ConversationRestartRequestAgentVersionTypedDict": ".conversationrestartrequest", @@ -2022,8 +1913,8 @@ "ConversationStreamRequestAgentVersion": ".conversationstreamrequest", "ConversationStreamRequestAgentVersionTypedDict": ".conversationstreamrequest", "ConversationStreamRequestHandoffExecution": ".conversationstreamrequest", - "ConversationStreamRequestTools": ".conversationstreamrequest", - "ConversationStreamRequestToolsTypedDict": ".conversationstreamrequest", + "ConversationStreamRequestTool": ".conversationstreamrequest", + "ConversationStreamRequestToolTypedDict": ".conversationstreamrequest", "ConversationStreamRequestTypedDict": ".conversationstreamrequest", "ConversationUsageInfo": ".conversationusageinfo", "ConversationUsageInfoTypedDict": ".conversationusageinfo", @@ -2033,12 +1924,11 @@ "DeleteFileOutTypedDict": ".deletefileout", "DeleteModelOut": ".deletemodelout", "DeleteModelOutTypedDict": ".deletemodelout", - "Content": ".deltamessage", - "ContentTypedDict": ".deltamessage", "DeltaMessage": ".deltamessage", + "DeltaMessageContent": ".deltamessage", + "DeltaMessageContentTypedDict": ".deltamessage", "DeltaMessageTypedDict": ".deltamessage", "DocumentLibraryTool": ".documentlibrarytool", - "DocumentLibraryToolType": ".documentlibrarytool", "DocumentLibraryToolTypedDict": ".documentlibrarytool", "DocumentOut": ".documentout", "DocumentOutTypedDict": ".documentout", @@ -2079,8 +1969,8 @@ "FilesAPIRoutesListFilesRequestTypedDict": ".files_api_routes_list_filesop", "FilesAPIRoutesRetrieveFileRequest": ".files_api_routes_retrieve_fileop", "FilesAPIRoutesRetrieveFileRequestTypedDict": ".files_api_routes_retrieve_fileop", - "FilesAPIRoutesUploadFileMultiPartBodyParams": ".files_api_routes_upload_fileop", - "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict": ".files_api_routes_upload_fileop", + "MultiPartBodyParams": ".files_api_routes_upload_fileop", + "MultiPartBodyParamsTypedDict": ".files_api_routes_upload_fileop", "FileSchema": ".fileschema", "FileSchemaTypedDict": ".fileschema", "FileSignedURL": ".filesignedurl", @@ -2100,7 +1990,6 @@ "FTModelCapabilitiesOut": ".ftmodelcapabilitiesout", "FTModelCapabilitiesOutTypedDict": ".ftmodelcapabilitiesout", "FTModelCard": ".ftmodelcard", - "FTModelCardType": ".ftmodelcard", "FTModelCardTypedDict": ".ftmodelcard", "Function": ".function", "FunctionTypedDict": ".function", @@ -2115,7 +2004,6 @@ "FunctionCallEntryArguments": ".functioncallentryarguments", "FunctionCallEntryArgumentsTypedDict": ".functioncallentryarguments", "FunctionCallEvent": ".functioncallevent", - "FunctionCallEventType": ".functioncallevent", "FunctionCallEventTypedDict": ".functioncallevent", "FunctionName": ".functionname", "FunctionNameTypedDict": ".functionname", @@ -2124,45 +2012,41 @@ "FunctionResultEntryType": ".functionresultentry", "FunctionResultEntryTypedDict": ".functionresultentry", "FunctionTool": ".functiontool", - "FunctionToolType": ".functiontool", "FunctionToolTypedDict": ".functiontool", "GithubRepositoryIn": ".githubrepositoryin", - "GithubRepositoryInType": ".githubrepositoryin", "GithubRepositoryInTypedDict": ".githubrepositoryin", "GithubRepositoryOut": ".githubrepositoryout", - "GithubRepositoryOutType": ".githubrepositoryout", "GithubRepositoryOutTypedDict": ".githubrepositoryout", "HTTPValidationError": ".httpvalidationerror", "HTTPValidationErrorData": ".httpvalidationerror", "ImageGenerationTool": ".imagegenerationtool", - "ImageGenerationToolType": ".imagegenerationtool", "ImageGenerationToolTypedDict": ".imagegenerationtool", "ImageURL": ".imageurl", "ImageURLTypedDict": ".imageurl", "ImageURLChunk": ".imageurlchunk", - "ImageURLChunkImageURL": ".imageurlchunk", - "ImageURLChunkImageURLTypedDict": ".imageurlchunk", "ImageURLChunkType": ".imageurlchunk", "ImageURLChunkTypedDict": ".imageurlchunk", + "ImageURLUnion": ".imageurlchunk", + "ImageURLUnionTypedDict": ".imageurlchunk", "InputEntries": ".inputentries", "InputEntriesTypedDict": ".inputentries", "Inputs": ".inputs", + "InputsMessage": ".inputs", + "InputsMessageTypedDict": ".inputs", "InputsTypedDict": ".inputs", "InstructRequestInputs": ".inputs", - "InstructRequestInputsMessages": ".inputs", - "InstructRequestInputsMessagesTypedDict": ".inputs", "InstructRequestInputsTypedDict": ".inputs", "InstructRequest": ".instructrequest", - "InstructRequestMessages": ".instructrequest", - "InstructRequestMessagesTypedDict": ".instructrequest", + "InstructRequestMessage": ".instructrequest", + "InstructRequestMessageTypedDict": ".instructrequest", "InstructRequestTypedDict": ".instructrequest", "Hyperparameters": ".jobin", "HyperparametersTypedDict": ".jobin", "JobIn": ".jobin", - "JobInIntegrations": ".jobin", - "JobInIntegrationsTypedDict": ".jobin", - "JobInRepositories": ".jobin", - "JobInRepositoriesTypedDict": ".jobin", + "JobInIntegration": ".jobin", + "JobInIntegrationTypedDict": ".jobin", + "JobInRepository": ".jobin", + "JobInRepositoryTypedDict": ".jobin", "JobInTypedDict": ".jobin", "JobMetadataOut": ".jobmetadataout", "JobMetadataOutTypedDict": ".jobmetadataout", @@ -2180,15 +2064,15 @@ "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", "JobsAPIRoutesFineTuningCreateFineTuningJobResponse": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "Response1": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "Response1TypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "Response": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "ResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", "JobsAPIRoutesFineTuningGetFineTuningJobRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", "JobsAPIRoutesFineTuningGetFineTuningJobResponse": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", "JobsAPIRoutesFineTuningGetFineTuningJobsRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", - "QueryParamStatus": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningGetFineTuningJobsStatus": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", "JobsAPIRoutesFineTuningStartFineTuningJobRequest": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", "JobsAPIRoutesFineTuningStartFineTuningJobResponse": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", @@ -2229,8 +2113,8 @@ "LibrariesDocumentsReprocessV1RequestTypedDict": ".libraries_documents_reprocess_v1op", "LibrariesDocumentsUpdateV1Request": ".libraries_documents_update_v1op", "LibrariesDocumentsUpdateV1RequestTypedDict": ".libraries_documents_update_v1op", - "LibrariesDocumentsUploadV1DocumentUpload": ".libraries_documents_upload_v1op", - "LibrariesDocumentsUploadV1DocumentUploadTypedDict": ".libraries_documents_upload_v1op", + "DocumentUpload": ".libraries_documents_upload_v1op", + "DocumentUploadTypedDict": ".libraries_documents_upload_v1op", "LibrariesDocumentsUploadV1Request": ".libraries_documents_upload_v1op", "LibrariesDocumentsUploadV1RequestTypedDict": ".libraries_documents_upload_v1op", "LibrariesGetV1Request": ".libraries_get_v1op", @@ -2264,10 +2148,10 @@ "MessageInputEntry": ".messageinputentry", "MessageInputEntryContent": ".messageinputentry", "MessageInputEntryContentTypedDict": ".messageinputentry", + "MessageInputEntryObject": ".messageinputentry", "MessageInputEntryRole": ".messageinputentry", "MessageInputEntryType": ".messageinputentry", "MessageInputEntryTypedDict": ".messageinputentry", - "Object": ".messageinputentry", "MessageOutputContentChunks": ".messageoutputcontentchunks", "MessageOutputContentChunksTypedDict": ".messageoutputcontentchunks", "MessageOutputEntry": ".messageoutputentry", @@ -2281,7 +2165,6 @@ "MessageOutputEventContent": ".messageoutputevent", "MessageOutputEventContentTypedDict": ".messageoutputevent", "MessageOutputEventRole": ".messageoutputevent", - "MessageOutputEventType": ".messageoutputevent", "MessageOutputEventTypedDict": ".messageoutputevent", "MetricOut": ".metricout", "MetricOutTypedDict": ".metricout", @@ -2290,12 +2173,12 @@ "ModelCapabilitiesTypedDict": ".modelcapabilities", "ModelConversation": ".modelconversation", "ModelConversationObject": ".modelconversation", - "ModelConversationTools": ".modelconversation", - "ModelConversationToolsTypedDict": ".modelconversation", + "ModelConversationTool": ".modelconversation", + "ModelConversationToolTypedDict": ".modelconversation", "ModelConversationTypedDict": ".modelconversation", - "Data": ".modellist", - "DataTypedDict": ".modellist", "ModelList": ".modellist", + "ModelListData": ".modellist", + "ModelListDataTypedDict": ".modellist", "ModelListTypedDict": ".modellist", "ModerationObject": ".moderationobject", "ModerationObjectTypedDict": ".moderationobject", @@ -2330,9 +2213,9 @@ "ProcessingStatusOutTypedDict": ".processingstatusout", "RealtimeTranscriptionError": ".realtimetranscriptionerror", "RealtimeTranscriptionErrorTypedDict": ".realtimetranscriptionerror", - "Message": ".realtimetranscriptionerrordetail", - "MessageTypedDict": ".realtimetranscriptionerrordetail", "RealtimeTranscriptionErrorDetail": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionErrorDetailMessage": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionErrorDetailMessageTypedDict": ".realtimetranscriptionerrordetail", "RealtimeTranscriptionErrorDetailTypedDict": ".realtimetranscriptionerrordetail", "RealtimeTranscriptionSession": ".realtimetranscriptionsession", "RealtimeTranscriptionSessionTypedDict": ".realtimetranscriptionsession", @@ -2345,22 +2228,19 @@ "ReferenceChunkTypedDict": ".referencechunk", "RequestSource": ".requestsource", "ResponseDoneEvent": ".responsedoneevent", - "ResponseDoneEventType": ".responsedoneevent", "ResponseDoneEventTypedDict": ".responsedoneevent", "ResponseErrorEvent": ".responseerrorevent", - "ResponseErrorEventType": ".responseerrorevent", "ResponseErrorEventTypedDict": ".responseerrorevent", "ResponseFormat": ".responseformat", "ResponseFormatTypedDict": ".responseformat", "ResponseFormats": ".responseformats", "ResponseStartedEvent": ".responsestartedevent", - "ResponseStartedEventType": ".responsestartedevent", "ResponseStartedEventTypedDict": ".responsestartedevent", "ResponseValidationError": ".responsevalidationerror", + "ResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", + "ResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrieve_model_v1_models_model_id_getop", "RetrieveModelV1ModelsModelIDGetRequest": ".retrieve_model_v1_models_model_id_getop", "RetrieveModelV1ModelsModelIDGetRequestTypedDict": ".retrieve_model_v1_models_model_id_getop", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrieve_model_v1_models_model_id_getop", "RetrieveFileOut": ".retrievefileout", "RetrieveFileOutTypedDict": ".retrievefileout", "SampleType": ".sampletype", @@ -2376,7 +2256,6 @@ "SharingOutTypedDict": ".sharingout", "Source": ".source", "SSETypes": ".ssetypes", - "Role": ".systemmessage", "SystemMessage": ".systemmessage", "SystemMessageContent": ".systemmessage", "SystemMessageContentTypedDict": ".systemmessage", @@ -2402,23 +2281,20 @@ "ToolExecutionDeltaEvent": ".toolexecutiondeltaevent", "ToolExecutionDeltaEventName": ".toolexecutiondeltaevent", "ToolExecutionDeltaEventNameTypedDict": ".toolexecutiondeltaevent", - "ToolExecutionDeltaEventType": ".toolexecutiondeltaevent", "ToolExecutionDeltaEventTypedDict": ".toolexecutiondeltaevent", "ToolExecutionDoneEvent": ".toolexecutiondoneevent", "ToolExecutionDoneEventName": ".toolexecutiondoneevent", "ToolExecutionDoneEventNameTypedDict": ".toolexecutiondoneevent", - "ToolExecutionDoneEventType": ".toolexecutiondoneevent", "ToolExecutionDoneEventTypedDict": ".toolexecutiondoneevent", - "Name": ".toolexecutionentry", - "NameTypedDict": ".toolexecutionentry", "ToolExecutionEntry": ".toolexecutionentry", + "ToolExecutionEntryName": ".toolexecutionentry", + "ToolExecutionEntryNameTypedDict": ".toolexecutionentry", "ToolExecutionEntryObject": ".toolexecutionentry", "ToolExecutionEntryType": ".toolexecutionentry", "ToolExecutionEntryTypedDict": ".toolexecutionentry", "ToolExecutionStartedEvent": ".toolexecutionstartedevent", "ToolExecutionStartedEventName": ".toolexecutionstartedevent", "ToolExecutionStartedEventNameTypedDict": ".toolexecutionstartedevent", - "ToolExecutionStartedEventType": ".toolexecutionstartedevent", "ToolExecutionStartedEventTypedDict": ".toolexecutionstartedevent", "ToolFileChunk": ".toolfilechunk", "ToolFileChunkTool": ".toolfilechunk", @@ -2428,7 +2304,6 @@ "ToolMessage": ".toolmessage", "ToolMessageContent": ".toolmessage", "ToolMessageContentTypedDict": ".toolmessage", - "ToolMessageRole": ".toolmessage", "ToolMessageTypedDict": ".toolmessage", "ToolReferenceChunk": ".toolreferencechunk", "ToolReferenceChunkTool": ".toolreferencechunk", @@ -2441,10 +2316,9 @@ "TranscriptionResponse": ".transcriptionresponse", "TranscriptionResponseTypedDict": ".transcriptionresponse", "TranscriptionSegmentChunk": ".transcriptionsegmentchunk", + "TranscriptionSegmentChunkType": ".transcriptionsegmentchunk", "TranscriptionSegmentChunkTypedDict": ".transcriptionsegmentchunk", - "Type": ".transcriptionsegmentchunk", "TranscriptionStreamDone": ".transcriptionstreamdone", - "TranscriptionStreamDoneType": ".transcriptionstreamdone", "TranscriptionStreamDoneTypedDict": ".transcriptionstreamdone", "TranscriptionStreamEvents": ".transcriptionstreamevents", "TranscriptionStreamEventsData": ".transcriptionstreamevents", @@ -2452,13 +2326,10 @@ "TranscriptionStreamEventsTypedDict": ".transcriptionstreamevents", "TranscriptionStreamEventTypes": ".transcriptionstreameventtypes", "TranscriptionStreamLanguage": ".transcriptionstreamlanguage", - "TranscriptionStreamLanguageType": ".transcriptionstreamlanguage", "TranscriptionStreamLanguageTypedDict": ".transcriptionstreamlanguage", "TranscriptionStreamSegmentDelta": ".transcriptionstreamsegmentdelta", - "TranscriptionStreamSegmentDeltaType": ".transcriptionstreamsegmentdelta", "TranscriptionStreamSegmentDeltaTypedDict": ".transcriptionstreamsegmentdelta", "TranscriptionStreamTextDelta": ".transcriptionstreamtextdelta", - "TranscriptionStreamTextDeltaType": ".transcriptionstreamtextdelta", "TranscriptionStreamTextDeltaTypedDict": ".transcriptionstreamtextdelta", "UnarchiveFTModelOut": ".unarchiveftmodelout", "UnarchiveFTModelOutObject": ".unarchiveftmodelout", @@ -2472,23 +2343,18 @@ "UserMessage": ".usermessage", "UserMessageContent": ".usermessage", "UserMessageContentTypedDict": ".usermessage", - "UserMessageRole": ".usermessage", "UserMessageTypedDict": ".usermessage", "Loc": ".validationerror", "LocTypedDict": ".validationerror", "ValidationError": ".validationerror", "ValidationErrorTypedDict": ".validationerror", "WandbIntegration": ".wandbintegration", - "WandbIntegrationType": ".wandbintegration", "WandbIntegrationTypedDict": ".wandbintegration", "WandbIntegrationOut": ".wandbintegrationout", - "WandbIntegrationOutType": ".wandbintegrationout", "WandbIntegrationOutTypedDict": ".wandbintegrationout", "WebSearchPremiumTool": ".websearchpremiumtool", - "WebSearchPremiumToolType": ".websearchpremiumtool", "WebSearchPremiumToolTypedDict": ".websearchpremiumtool", "WebSearchTool": ".websearchtool", - "WebSearchToolType": ".websearchtool", "WebSearchToolTypedDict": ".websearchtool", } diff --git a/src/mistralai/client/models/agent.py b/src/mistralai/client/models/agent.py index 3bedb3a3..b2fe3939 100644 --- a/src/mistralai/client/models/agent.py +++ b/src/mistralai/client/models/agent.py @@ -16,14 +16,13 @@ UNSET, UNSET_SENTINEL, ) -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer +from pydantic import Field, model_serializer from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -AgentToolsTypedDict = TypeAliasType( - "AgentToolsTypedDict", +AgentToolTypedDict = TypeAliasType( + "AgentToolTypedDict", Union[ WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, @@ -35,16 +34,16 @@ ) -AgentTools = Annotated[ +AgentTool = Annotated[ Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Field(discriminator="TYPE"), ] @@ -63,7 +62,7 @@ class AgentTypedDict(TypedDict): source: str instructions: NotRequired[Nullable[str]] r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentToolsTypedDict]] + tools: NotRequired[List[AgentToolTypedDict]] r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" @@ -95,7 +94,7 @@ class Agent(BaseModel): instructions: OptionalNullable[str] = UNSET r"""Instruction prompt the model will follow during the conversation.""" - tools: Optional[List[AgentTools]] = None + tools: Optional[List[AgentTool]] = None r"""List of tools which are available to the model during the conversation.""" completion_args: Optional[CompletionArgs] = None diff --git a/src/mistralai/client/models/agentcreationrequest.py b/src/mistralai/client/models/agentcreationrequest.py index 61a5aff5..561bef64 100644 --- a/src/mistralai/client/models/agentcreationrequest.py +++ b/src/mistralai/client/models/agentcreationrequest.py @@ -15,14 +15,13 @@ UNSET, UNSET_SENTINEL, ) -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer +from pydantic import Field, model_serializer from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -AgentCreationRequestToolsTypedDict = TypeAliasType( - "AgentCreationRequestToolsTypedDict", +AgentCreationRequestToolTypedDict = TypeAliasType( + "AgentCreationRequestToolTypedDict", Union[ WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, @@ -34,16 +33,16 @@ ) -AgentCreationRequestTools = Annotated[ +AgentCreationRequestTool = Annotated[ Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Field(discriminator="TYPE"), ] @@ -52,7 +51,7 @@ class AgentCreationRequestTypedDict(TypedDict): name: str instructions: NotRequired[Nullable[str]] r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentCreationRequestToolsTypedDict]] + tools: NotRequired[List[AgentCreationRequestToolTypedDict]] r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" @@ -69,7 +68,7 @@ class AgentCreationRequest(BaseModel): instructions: OptionalNullable[str] = UNSET r"""Instruction prompt the model will follow during the conversation.""" - tools: Optional[List[AgentCreationRequestTools]] = None + tools: Optional[List[AgentCreationRequestTool]] = None r"""List of tools which are available to the model during the conversation.""" completion_args: Optional[CompletionArgs] = None diff --git a/src/mistralai/client/models/agenthandoffdoneevent.py b/src/mistralai/client/models/agenthandoffdoneevent.py index c826aa5e..40bb446b 100644 --- a/src/mistralai/client/models/agenthandoffdoneevent.py +++ b/src/mistralai/client/models/agenthandoffdoneevent.py @@ -3,18 +3,18 @@ from __future__ import annotations from datetime import datetime from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AgentHandoffDoneEventType = Literal["agent.handoff.done",] +from typing_extensions import Annotated, NotRequired, TypedDict class AgentHandoffDoneEventTypedDict(TypedDict): id: str next_agent_id: str next_agent_name: str - type: NotRequired[AgentHandoffDoneEventType] + type: Literal["agent.handoff.done"] created_at: NotRequired[datetime] output_index: NotRequired[int] @@ -26,7 +26,13 @@ class AgentHandoffDoneEvent(BaseModel): next_agent_name: str - type: Optional[AgentHandoffDoneEventType] = "agent.handoff.done" + TYPE: Annotated[ + Annotated[ + Literal["agent.handoff.done"], + AfterValidator(validate_const("agent.handoff.done")), + ], + pydantic.Field(alias="type"), + ] = "agent.handoff.done" created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/agenthandoffstartedevent.py b/src/mistralai/client/models/agenthandoffstartedevent.py index 4b8ff1e5..93f56db0 100644 --- a/src/mistralai/client/models/agenthandoffstartedevent.py +++ b/src/mistralai/client/models/agenthandoffstartedevent.py @@ -3,18 +3,18 @@ from __future__ import annotations from datetime import datetime from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AgentHandoffStartedEventType = Literal["agent.handoff.started",] +from typing_extensions import Annotated, NotRequired, TypedDict class AgentHandoffStartedEventTypedDict(TypedDict): id: str previous_agent_id: str previous_agent_name: str - type: NotRequired[AgentHandoffStartedEventType] + type: Literal["agent.handoff.started"] created_at: NotRequired[datetime] output_index: NotRequired[int] @@ -26,7 +26,13 @@ class AgentHandoffStartedEvent(BaseModel): previous_agent_name: str - type: Optional[AgentHandoffStartedEventType] = "agent.handoff.started" + TYPE: Annotated[ + Annotated[ + Literal["agent.handoff.started"], + AfterValidator(validate_const("agent.handoff.started")), + ], + pydantic.Field(alias="type"), + ] = "agent.handoff.started" created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/agents_api_v1_agents_getop.py b/src/mistralai/client/models/agents_api_v1_agents_getop.py index d4817457..57abff76 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_getop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_getop.py @@ -14,17 +14,19 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -QueryParamAgentVersionTypedDict = TypeAliasType( - "QueryParamAgentVersionTypedDict", Union[int, str] +AgentsAPIV1AgentsGetAgentVersionTypedDict = TypeAliasType( + "AgentsAPIV1AgentsGetAgentVersionTypedDict", Union[int, str] ) -QueryParamAgentVersion = TypeAliasType("QueryParamAgentVersion", Union[int, str]) +AgentsAPIV1AgentsGetAgentVersion = TypeAliasType( + "AgentsAPIV1AgentsGetAgentVersion", Union[int, str] +) class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): agent_id: str - agent_version: NotRequired[Nullable[QueryParamAgentVersionTypedDict]] + agent_version: NotRequired[Nullable[AgentsAPIV1AgentsGetAgentVersionTypedDict]] class AgentsAPIV1AgentsGetRequest(BaseModel): @@ -33,7 +35,7 @@ class AgentsAPIV1AgentsGetRequest(BaseModel): ] agent_version: Annotated[ - OptionalNullable[QueryParamAgentVersion], + OptionalNullable[AgentsAPIV1AgentsGetAgentVersion], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET diff --git a/src/mistralai/client/models/agents_api_v1_conversations_getop.py b/src/mistralai/client/models/agents_api_v1_conversations_getop.py index c919f99e..7308708e 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_getop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_getop.py @@ -21,15 +21,14 @@ class AgentsAPIV1ConversationsGetRequest(BaseModel): r"""ID of the conversation from which we are fetching metadata.""" -AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict = TypeAliasType( - "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict", +ResponseV1ConversationsGetTypedDict = TypeAliasType( + "ResponseV1ConversationsGetTypedDict", Union[AgentConversationTypedDict, ModelConversationTypedDict], ) r"""Successful Response""" -AgentsAPIV1ConversationsGetResponseV1ConversationsGet = TypeAliasType( - "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", - Union[AgentConversation, ModelConversation], +ResponseV1ConversationsGet = TypeAliasType( + "ResponseV1ConversationsGet", Union[AgentConversation, ModelConversation] ) r"""Successful Response""" diff --git a/src/mistralai/client/models/agents_api_v1_conversations_listop.py b/src/mistralai/client/models/agents_api_v1_conversations_listop.py index bb3c7127..aae9c74e 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_listop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_listop.py @@ -69,12 +69,12 @@ def serialize_model(self, handler): return m -ResponseBodyTypedDict = TypeAliasType( - "ResponseBodyTypedDict", +AgentsAPIV1ConversationsListResponseTypedDict = TypeAliasType( + "AgentsAPIV1ConversationsListResponseTypedDict", Union[AgentConversationTypedDict, ModelConversationTypedDict], ) -ResponseBody = TypeAliasType( - "ResponseBody", Union[AgentConversation, ModelConversation] +AgentsAPIV1ConversationsListResponse = TypeAliasType( + "AgentsAPIV1ConversationsListResponse", Union[AgentConversation, ModelConversation] ) diff --git a/src/mistralai/client/models/agentscompletionrequest.py b/src/mistralai/client/models/agentscompletionrequest.py index 22368e44..3b045ed6 100644 --- a/src/mistralai/client/models/agentscompletionrequest.py +++ b/src/mistralai/client/models/agentscompletionrequest.py @@ -36,8 +36,8 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -AgentsCompletionRequestMessagesTypedDict = TypeAliasType( - "AgentsCompletionRequestMessagesTypedDict", +AgentsCompletionRequestMessageTypedDict = TypeAliasType( + "AgentsCompletionRequestMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -47,7 +47,7 @@ ) -AgentsCompletionRequestMessages = Annotated[ +AgentsCompletionRequestMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -70,7 +70,7 @@ class AgentsCompletionRequestTypedDict(TypedDict): - messages: List[AgentsCompletionRequestMessagesTypedDict] + messages: List[AgentsCompletionRequestMessageTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" agent_id: str r"""The ID of the agent to use for this completion.""" @@ -101,7 +101,7 @@ class AgentsCompletionRequestTypedDict(TypedDict): class AgentsCompletionRequest(BaseModel): - messages: List[AgentsCompletionRequestMessages] + messages: List[AgentsCompletionRequestMessage] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" agent_id: str diff --git a/src/mistralai/client/models/agentscompletionstreamrequest.py b/src/mistralai/client/models/agentscompletionstreamrequest.py index 37d46c79..23920c4e 100644 --- a/src/mistralai/client/models/agentscompletionstreamrequest.py +++ b/src/mistralai/client/models/agentscompletionstreamrequest.py @@ -36,8 +36,8 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -AgentsCompletionStreamRequestMessagesTypedDict = TypeAliasType( - "AgentsCompletionStreamRequestMessagesTypedDict", +AgentsCompletionStreamRequestMessageTypedDict = TypeAliasType( + "AgentsCompletionStreamRequestMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -47,7 +47,7 @@ ) -AgentsCompletionStreamRequestMessages = Annotated[ +AgentsCompletionStreamRequestMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -70,7 +70,7 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): - messages: List[AgentsCompletionStreamRequestMessagesTypedDict] + messages: List[AgentsCompletionStreamRequestMessageTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" agent_id: str r"""The ID of the agent to use for this completion.""" @@ -100,7 +100,7 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): class AgentsCompletionStreamRequest(BaseModel): - messages: List[AgentsCompletionStreamRequestMessages] + messages: List[AgentsCompletionStreamRequestMessage] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" agent_id: str diff --git a/src/mistralai/client/models/agentupdaterequest.py b/src/mistralai/client/models/agentupdaterequest.py index 261ac069..be93157d 100644 --- a/src/mistralai/client/models/agentupdaterequest.py +++ b/src/mistralai/client/models/agentupdaterequest.py @@ -15,14 +15,13 @@ UNSET, UNSET_SENTINEL, ) -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer +from pydantic import Field, model_serializer from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -AgentUpdateRequestToolsTypedDict = TypeAliasType( - "AgentUpdateRequestToolsTypedDict", +AgentUpdateRequestToolTypedDict = TypeAliasType( + "AgentUpdateRequestToolTypedDict", Union[ WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, @@ -34,23 +33,23 @@ ) -AgentUpdateRequestTools = Annotated[ +AgentUpdateRequestTool = Annotated[ Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Field(discriminator="TYPE"), ] class AgentUpdateRequestTypedDict(TypedDict): instructions: NotRequired[Nullable[str]] r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentUpdateRequestToolsTypedDict]] + tools: NotRequired[List[AgentUpdateRequestToolTypedDict]] r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" @@ -66,7 +65,7 @@ class AgentUpdateRequest(BaseModel): instructions: OptionalNullable[str] = UNSET r"""Instruction prompt the model will follow during the conversation.""" - tools: Optional[List[AgentUpdateRequestTools]] = None + tools: Optional[List[AgentUpdateRequestTool]] = None r"""List of tools which are available to the model during the conversation.""" completion_args: Optional[CompletionArgs] = None diff --git a/src/mistralai/client/models/audiochunk.py b/src/mistralai/client/models/audiochunk.py index 80d836f2..fae1193c 100644 --- a/src/mistralai/client/models/audiochunk.py +++ b/src/mistralai/client/models/audiochunk.py @@ -2,19 +2,24 @@ from __future__ import annotations from mistralai.client.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AudioChunkType = Literal["input_audio",] +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict class AudioChunkTypedDict(TypedDict): input_audio: str - type: NotRequired[AudioChunkType] + type: Literal["input_audio"] class AudioChunk(BaseModel): input_audio: str - type: Optional[AudioChunkType] = "input_audio" + TYPE: Annotated[ + Annotated[ + Literal["input_audio"], AfterValidator(validate_const("input_audio")) + ], + pydantic.Field(alias="type"), + ] = "input_audio" diff --git a/src/mistralai/client/models/basemodelcard.py b/src/mistralai/client/models/basemodelcard.py index 8ce7f139..f16607d5 100644 --- a/src/mistralai/client/models/basemodelcard.py +++ b/src/mistralai/client/models/basemodelcard.py @@ -18,9 +18,6 @@ from typing_extensions import Annotated, NotRequired, TypedDict -BaseModelCardType = Literal["base",] - - class BaseModelCardTypedDict(TypedDict): id: str capabilities: ModelCapabilitiesTypedDict @@ -34,7 +31,7 @@ class BaseModelCardTypedDict(TypedDict): deprecation: NotRequired[Nullable[datetime]] deprecation_replacement_model: NotRequired[Nullable[str]] default_model_temperature: NotRequired[Nullable[float]] - type: BaseModelCardType + type: Literal["base"] class BaseModelCard(BaseModel): @@ -63,7 +60,7 @@ class BaseModelCard(BaseModel): default_model_temperature: OptionalNullable[float] = UNSET TYPE: Annotated[ - Annotated[Optional[BaseModelCardType], AfterValidator(validate_const("base"))], + Annotated[Literal["base"], AfterValidator(validate_const("base"))], pydantic.Field(alias="type"), ] = "base" @@ -80,7 +77,6 @@ def serialize_model(self, handler): "deprecation", "deprecation_replacement_model", "default_model_temperature", - "type", ] nullable_fields = [ "name", diff --git a/src/mistralai/client/models/batchjobstatus.py b/src/mistralai/client/models/batchjobstatus.py index 4b28059b..1ba3dd55 100644 --- a/src/mistralai/client/models/batchjobstatus.py +++ b/src/mistralai/client/models/batchjobstatus.py @@ -1,15 +1,19 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union -BatchJobStatus = Literal[ - "QUEUED", - "RUNNING", - "SUCCESS", - "FAILED", - "TIMEOUT_EXCEEDED", - "CANCELLATION_REQUESTED", - "CANCELLED", +BatchJobStatus = Union[ + Literal[ + "QUEUED", + "RUNNING", + "SUCCESS", + "FAILED", + "TIMEOUT_EXCEEDED", + "CANCELLATION_REQUESTED", + "CANCELLED", + ], + UnrecognizedStr, ] diff --git a/src/mistralai/client/models/builtinconnectors.py b/src/mistralai/client/models/builtinconnectors.py index 6a3b2476..4a98b45b 100644 --- a/src/mistralai/client/models/builtinconnectors.py +++ b/src/mistralai/client/models/builtinconnectors.py @@ -1,13 +1,17 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union -BuiltInConnectors = Literal[ - "web_search", - "web_search_premium", - "code_interpreter", - "image_generation", - "document_library", +BuiltInConnectors = Union[ + Literal[ + "web_search", + "web_search_premium", + "code_interpreter", + "image_generation", + "document_library", + ], + UnrecognizedStr, ] diff --git a/src/mistralai/client/models/chatcompletionchoice.py b/src/mistralai/client/models/chatcompletionchoice.py index 5d888cfd..5752f7c1 100644 --- a/src/mistralai/client/models/chatcompletionchoice.py +++ b/src/mistralai/client/models/chatcompletionchoice.py @@ -7,7 +7,7 @@ from typing_extensions import TypedDict -FinishReason = Union[ +ChatCompletionChoiceFinishReason = Union[ Literal[ "stop", "length", @@ -22,7 +22,7 @@ class ChatCompletionChoiceTypedDict(TypedDict): index: int message: AssistantMessageTypedDict - finish_reason: FinishReason + finish_reason: ChatCompletionChoiceFinishReason class ChatCompletionChoice(BaseModel): @@ -30,4 +30,4 @@ class ChatCompletionChoice(BaseModel): message: AssistantMessage - finish_reason: FinishReason + finish_reason: ChatCompletionChoiceFinishReason diff --git a/src/mistralai/client/models/chatcompletionrequest.py b/src/mistralai/client/models/chatcompletionrequest.py index 30fce28d..62c375e0 100644 --- a/src/mistralai/client/models/chatcompletionrequest.py +++ b/src/mistralai/client/models/chatcompletionrequest.py @@ -24,16 +24,20 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]]) +ChatCompletionRequestStopTypedDict = TypeAliasType( + "ChatCompletionRequestStopTypedDict", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -Stop = TypeAliasType("Stop", Union[str, List[str]]) +ChatCompletionRequestStop = TypeAliasType( + "ChatCompletionRequestStop", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -MessagesTypedDict = TypeAliasType( - "MessagesTypedDict", +ChatCompletionRequestMessageTypedDict = TypeAliasType( + "ChatCompletionRequestMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -43,7 +47,7 @@ ) -Messages = Annotated[ +ChatCompletionRequestMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -70,7 +74,7 @@ class ChatCompletionRequestTypedDict(TypedDict): model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[MessagesTypedDict] + messages: List[ChatCompletionRequestMessageTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: NotRequired[Nullable[float]] r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" @@ -80,7 +84,7 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" stream: NotRequired[bool] r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - stop: NotRequired[StopTypedDict] + stop: NotRequired[ChatCompletionRequestStopTypedDict] r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" @@ -111,7 +115,7 @@ class ChatCompletionRequest(BaseModel): model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[Messages] + messages: List[ChatCompletionRequestMessage] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: OptionalNullable[float] = UNSET @@ -126,7 +130,7 @@ class ChatCompletionRequest(BaseModel): stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - stop: Optional[Stop] = None + stop: Optional[ChatCompletionRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: OptionalNullable[int] = UNSET diff --git a/src/mistralai/client/models/chatcompletionstreamrequest.py b/src/mistralai/client/models/chatcompletionstreamrequest.py index 21dad38b..4e5c281d 100644 --- a/src/mistralai/client/models/chatcompletionstreamrequest.py +++ b/src/mistralai/client/models/chatcompletionstreamrequest.py @@ -36,8 +36,8 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -ChatCompletionStreamRequestMessagesTypedDict = TypeAliasType( - "ChatCompletionStreamRequestMessagesTypedDict", +ChatCompletionStreamRequestMessageTypedDict = TypeAliasType( + "ChatCompletionStreamRequestMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -47,7 +47,7 @@ ) -ChatCompletionStreamRequestMessages = Annotated[ +ChatCompletionStreamRequestMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -74,7 +74,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[ChatCompletionStreamRequestMessagesTypedDict] + messages: List[ChatCompletionStreamRequestMessageTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: NotRequired[Nullable[float]] r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" @@ -114,7 +114,7 @@ class ChatCompletionStreamRequest(BaseModel): model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[ChatCompletionStreamRequestMessages] + messages: List[ChatCompletionStreamRequestMessage] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: OptionalNullable[float] = UNSET diff --git a/src/mistralai/client/models/chatmoderationrequest.py b/src/mistralai/client/models/chatmoderationrequest.py index 631c914d..4e2611c8 100644 --- a/src/mistralai/client/models/chatmoderationrequest.py +++ b/src/mistralai/client/models/chatmoderationrequest.py @@ -13,8 +13,8 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -TwoTypedDict = TypeAliasType( - "TwoTypedDict", +ChatModerationRequestInputs2TypedDict = TypeAliasType( + "ChatModerationRequestInputs2TypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -24,7 +24,7 @@ ) -Two = Annotated[ +ChatModerationRequestInputs2 = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -35,8 +35,8 @@ ] -OneTypedDict = TypeAliasType( - "OneTypedDict", +ChatModerationRequestInputs1TypedDict = TypeAliasType( + "ChatModerationRequestInputs1TypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -46,7 +46,7 @@ ) -One = Annotated[ +ChatModerationRequestInputs1 = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -57,27 +57,31 @@ ] -ChatModerationRequestInputsTypedDict = TypeAliasType( - "ChatModerationRequestInputsTypedDict", - Union[List[OneTypedDict], List[List[TwoTypedDict]]], +ChatModerationRequestInputs3TypedDict = TypeAliasType( + "ChatModerationRequestInputs3TypedDict", + Union[ + List[ChatModerationRequestInputs1TypedDict], + List[List[ChatModerationRequestInputs2TypedDict]], + ], ) r"""Chat to classify""" -ChatModerationRequestInputs = TypeAliasType( - "ChatModerationRequestInputs", Union[List[One], List[List[Two]]] +ChatModerationRequestInputs3 = TypeAliasType( + "ChatModerationRequestInputs3", + Union[List[ChatModerationRequestInputs1], List[List[ChatModerationRequestInputs2]]], ) r"""Chat to classify""" class ChatModerationRequestTypedDict(TypedDict): - inputs: ChatModerationRequestInputsTypedDict + inputs: ChatModerationRequestInputs3TypedDict r"""Chat to classify""" model: str class ChatModerationRequest(BaseModel): - inputs: Annotated[ChatModerationRequestInputs, pydantic.Field(alias="input")] + inputs: Annotated[ChatModerationRequestInputs3, pydantic.Field(alias="input")] r"""Chat to classify""" model: str diff --git a/src/mistralai/client/models/classifierdetailedjobout.py b/src/mistralai/client/models/classifierdetailedjobout.py index 1de4534f..ffe99270 100644 --- a/src/mistralai/client/models/classifierdetailedjobout.py +++ b/src/mistralai/client/models/classifierdetailedjobout.py @@ -16,36 +16,40 @@ OptionalNullable, UNSET, UNSET_SENTINEL, + UnrecognizedStr, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ClassifierDetailedJobOutStatus = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +ClassifierDetailedJobOutStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, ] ClassifierDetailedJobOutObject = Literal["job",] -ClassifierDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict +ClassifierDetailedJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict -ClassifierDetailedJobOutIntegrations = WandbIntegrationOut - - -ClassifierDetailedJobOutJobType = Literal["classifier",] +ClassifierDetailedJobOutIntegration = WandbIntegrationOut class ClassifierDetailedJobOutTypedDict(TypedDict): @@ -64,11 +68,11 @@ class ClassifierDetailedJobOutTypedDict(TypedDict): fine_tuned_model: NotRequired[Nullable[str]] suffix: NotRequired[Nullable[str]] integrations: NotRequired[ - Nullable[List[ClassifierDetailedJobOutIntegrationsTypedDict]] + Nullable[List[ClassifierDetailedJobOutIntegrationTypedDict]] ] trained_tokens: NotRequired[Nullable[int]] metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[ClassifierDetailedJobOutJobType] + job_type: Literal["classifier"] events: NotRequired[List[EventOutTypedDict]] r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" checkpoints: NotRequired[List[CheckpointOutTypedDict]] @@ -102,13 +106,16 @@ class ClassifierDetailedJobOut(BaseModel): suffix: OptionalNullable[str] = UNSET - integrations: OptionalNullable[List[ClassifierDetailedJobOutIntegrations]] = UNSET + integrations: OptionalNullable[List[ClassifierDetailedJobOutIntegration]] = UNSET trained_tokens: OptionalNullable[int] = UNSET metadata: OptionalNullable[JobMetadataOut] = UNSET - job_type: Optional[ClassifierDetailedJobOutJobType] = "classifier" + JOB_TYPE: Annotated[ + Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], + pydantic.Field(alias="job_type"), + ] = "classifier" events: Optional[List[EventOut]] = None r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" @@ -125,7 +132,6 @@ def serialize_model(self, handler): "integrations", "trained_tokens", "metadata", - "job_type", "events", "checkpoints", ] diff --git a/src/mistralai/client/models/classifierftmodelout.py b/src/mistralai/client/models/classifierftmodelout.py index a4572108..c6d34167 100644 --- a/src/mistralai/client/models/classifierftmodelout.py +++ b/src/mistralai/client/models/classifierftmodelout.py @@ -13,17 +13,17 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict ClassifierFTModelOutObject = Literal["model",] -ClassifierFTModelOutModelType = Literal["classifier",] - - class ClassifierFTModelOutTypedDict(TypedDict): id: str created: int @@ -40,7 +40,7 @@ class ClassifierFTModelOutTypedDict(TypedDict): description: NotRequired[Nullable[str]] max_context_length: NotRequired[int] aliases: NotRequired[List[str]] - model_type: NotRequired[ClassifierFTModelOutModelType] + model_type: Literal["classifier"] class ClassifierFTModelOut(BaseModel): @@ -74,7 +74,10 @@ class ClassifierFTModelOut(BaseModel): aliases: Optional[List[str]] = None - model_type: Optional[ClassifierFTModelOutModelType] = "classifier" + MODEL_TYPE: Annotated[ + Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], + pydantic.Field(alias="model_type"), + ] = "classifier" @model_serializer(mode="wrap") def serialize_model(self, handler): @@ -84,7 +87,6 @@ def serialize_model(self, handler): "description", "max_context_length", "aliases", - "model_type", ] nullable_fields = ["name", "description"] null_default_fields = [] diff --git a/src/mistralai/client/models/classifierjobout.py b/src/mistralai/client/models/classifierjobout.py index ab1e261d..1390aea1 100644 --- a/src/mistralai/client/models/classifierjobout.py +++ b/src/mistralai/client/models/classifierjobout.py @@ -13,23 +13,30 @@ OptionalNullable, UNSET, UNSET_SENTINEL, + UnrecognizedStr, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ClassifierJobOutStatus = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +ClassifierJobOutStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, ] r"""The current status of the fine-tuning job.""" @@ -38,14 +45,10 @@ r"""The object type of the fine-tuning job.""" -ClassifierJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict +ClassifierJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict -ClassifierJobOutIntegrations = WandbIntegrationOut - - -ClassifierJobOutJobType = Literal["classifier",] -r"""The type of job (`FT` for fine-tuning).""" +ClassifierJobOutIntegration = WandbIntegrationOut class ClassifierJobOutTypedDict(TypedDict): @@ -71,12 +74,12 @@ class ClassifierJobOutTypedDict(TypedDict): r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: NotRequired[Nullable[List[ClassifierJobOutIntegrationsTypedDict]]] + integrations: NotRequired[Nullable[List[ClassifierJobOutIntegrationTypedDict]]] r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: NotRequired[Nullable[int]] r"""Total number of tokens trained.""" metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[ClassifierJobOutJobType] + job_type: Literal["classifier"] r"""The type of job (`FT` for fine-tuning).""" @@ -115,7 +118,7 @@ class ClassifierJobOut(BaseModel): suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: OptionalNullable[List[ClassifierJobOutIntegrations]] = UNSET + integrations: OptionalNullable[List[ClassifierJobOutIntegration]] = UNSET r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: OptionalNullable[int] = UNSET @@ -123,7 +126,10 @@ class ClassifierJobOut(BaseModel): metadata: OptionalNullable[JobMetadataOut] = UNSET - job_type: Optional[ClassifierJobOutJobType] = "classifier" + JOB_TYPE: Annotated[ + Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], + pydantic.Field(alias="job_type"), + ] = "classifier" r"""The type of job (`FT` for fine-tuning).""" @model_serializer(mode="wrap") @@ -136,7 +142,6 @@ def serialize_model(self, handler): "integrations", "trained_tokens", "metadata", - "job_type", ] nullable_fields = [ "validation_files", diff --git a/src/mistralai/client/models/codeinterpretertool.py b/src/mistralai/client/models/codeinterpretertool.py index faf5b0b7..2f34cbda 100644 --- a/src/mistralai/client/models/codeinterpretertool.py +++ b/src/mistralai/client/models/codeinterpretertool.py @@ -2,16 +2,22 @@ from __future__ import annotations from mistralai.client.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -CodeInterpreterToolType = Literal["code_interpreter",] +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict class CodeInterpreterToolTypedDict(TypedDict): - type: NotRequired[CodeInterpreterToolType] + type: Literal["code_interpreter"] class CodeInterpreterTool(BaseModel): - type: Optional[CodeInterpreterToolType] = "code_interpreter" + TYPE: Annotated[ + Annotated[ + Literal["code_interpreter"], + AfterValidator(validate_const("code_interpreter")), + ], + pydantic.Field(alias="type"), + ] = "code_interpreter" diff --git a/src/mistralai/client/models/completiondetailedjobout.py b/src/mistralai/client/models/completiondetailedjobout.py index 85c0c803..ea444b8b 100644 --- a/src/mistralai/client/models/completiondetailedjobout.py +++ b/src/mistralai/client/models/completiondetailedjobout.py @@ -16,42 +16,46 @@ OptionalNullable, UNSET, UNSET_SENTINEL, + UnrecognizedStr, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -CompletionDetailedJobOutStatus = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +CompletionDetailedJobOutStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, ] CompletionDetailedJobOutObject = Literal["job",] -CompletionDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict +CompletionDetailedJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict -CompletionDetailedJobOutIntegrations = WandbIntegrationOut +CompletionDetailedJobOutIntegration = WandbIntegrationOut -CompletionDetailedJobOutJobType = Literal["completion",] +CompletionDetailedJobOutRepositoryTypedDict = GithubRepositoryOutTypedDict -CompletionDetailedJobOutRepositoriesTypedDict = GithubRepositoryOutTypedDict - - -CompletionDetailedJobOutRepositories = GithubRepositoryOut +CompletionDetailedJobOutRepository = GithubRepositoryOut class CompletionDetailedJobOutTypedDict(TypedDict): @@ -69,12 +73,12 @@ class CompletionDetailedJobOutTypedDict(TypedDict): fine_tuned_model: NotRequired[Nullable[str]] suffix: NotRequired[Nullable[str]] integrations: NotRequired[ - Nullable[List[CompletionDetailedJobOutIntegrationsTypedDict]] + Nullable[List[CompletionDetailedJobOutIntegrationTypedDict]] ] trained_tokens: NotRequired[Nullable[int]] metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[CompletionDetailedJobOutJobType] - repositories: NotRequired[List[CompletionDetailedJobOutRepositoriesTypedDict]] + job_type: Literal["completion"] + repositories: NotRequired[List[CompletionDetailedJobOutRepositoryTypedDict]] events: NotRequired[List[EventOutTypedDict]] r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" checkpoints: NotRequired[List[CheckpointOutTypedDict]] @@ -106,15 +110,18 @@ class CompletionDetailedJobOut(BaseModel): suffix: OptionalNullable[str] = UNSET - integrations: OptionalNullable[List[CompletionDetailedJobOutIntegrations]] = UNSET + integrations: OptionalNullable[List[CompletionDetailedJobOutIntegration]] = UNSET trained_tokens: OptionalNullable[int] = UNSET metadata: OptionalNullable[JobMetadataOut] = UNSET - job_type: Optional[CompletionDetailedJobOutJobType] = "completion" + JOB_TYPE: Annotated[ + Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], + pydantic.Field(alias="job_type"), + ] = "completion" - repositories: Optional[List[CompletionDetailedJobOutRepositories]] = None + repositories: Optional[List[CompletionDetailedJobOutRepository]] = None events: Optional[List[EventOut]] = None r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" @@ -131,7 +138,6 @@ def serialize_model(self, handler): "integrations", "trained_tokens", "metadata", - "job_type", "repositories", "events", "checkpoints", diff --git a/src/mistralai/client/models/completionftmodelout.py b/src/mistralai/client/models/completionftmodelout.py index ccecbb6a..92f530af 100644 --- a/src/mistralai/client/models/completionftmodelout.py +++ b/src/mistralai/client/models/completionftmodelout.py @@ -12,17 +12,17 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict CompletionFTModelOutObject = Literal["model",] -ModelType = Literal["completion",] - - class CompletionFTModelOutTypedDict(TypedDict): id: str created: int @@ -38,7 +38,7 @@ class CompletionFTModelOutTypedDict(TypedDict): description: NotRequired[Nullable[str]] max_context_length: NotRequired[int] aliases: NotRequired[List[str]] - model_type: NotRequired[ModelType] + model_type: Literal["completion"] class CompletionFTModelOut(BaseModel): @@ -70,7 +70,10 @@ class CompletionFTModelOut(BaseModel): aliases: Optional[List[str]] = None - model_type: Optional[ModelType] = "completion" + MODEL_TYPE: Annotated[ + Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], + pydantic.Field(alias="model_type"), + ] = "completion" @model_serializer(mode="wrap") def serialize_model(self, handler): @@ -80,7 +83,6 @@ def serialize_model(self, handler): "description", "max_context_length", "aliases", - "model_type", ] nullable_fields = ["name", "description"] null_default_fields = [] diff --git a/src/mistralai/client/models/completionjobout.py b/src/mistralai/client/models/completionjobout.py index ecd95bb9..1628d8bb 100644 --- a/src/mistralai/client/models/completionjobout.py +++ b/src/mistralai/client/models/completionjobout.py @@ -14,23 +14,30 @@ OptionalNullable, UNSET, UNSET_SENTINEL, + UnrecognizedStr, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -Status = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +CompletionJobOutStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, ] r"""The current status of the fine-tuning job.""" @@ -39,20 +46,16 @@ r"""The object type of the fine-tuning job.""" -IntegrationsTypedDict = WandbIntegrationOutTypedDict +CompletionJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict -Integrations = WandbIntegrationOut +CompletionJobOutIntegration = WandbIntegrationOut -JobType = Literal["completion",] -r"""The type of job (`FT` for fine-tuning).""" +CompletionJobOutRepositoryTypedDict = GithubRepositoryOutTypedDict -RepositoriesTypedDict = GithubRepositoryOutTypedDict - - -Repositories = GithubRepositoryOut +CompletionJobOutRepository = GithubRepositoryOut class CompletionJobOutTypedDict(TypedDict): @@ -61,7 +64,7 @@ class CompletionJobOutTypedDict(TypedDict): auto_start: bool model: str r"""The name of the model to fine-tune.""" - status: Status + status: CompletionJobOutStatus r"""The current status of the fine-tuning job.""" created_at: int r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" @@ -78,14 +81,14 @@ class CompletionJobOutTypedDict(TypedDict): r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: NotRequired[Nullable[List[IntegrationsTypedDict]]] + integrations: NotRequired[Nullable[List[CompletionJobOutIntegrationTypedDict]]] r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: NotRequired[Nullable[int]] r"""Total number of tokens trained.""" metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[JobType] + job_type: Literal["completion"] r"""The type of job (`FT` for fine-tuning).""" - repositories: NotRequired[List[RepositoriesTypedDict]] + repositories: NotRequired[List[CompletionJobOutRepositoryTypedDict]] class CompletionJobOut(BaseModel): @@ -97,7 +100,7 @@ class CompletionJobOut(BaseModel): model: str r"""The name of the model to fine-tune.""" - status: Status + status: CompletionJobOutStatus r"""The current status of the fine-tuning job.""" created_at: int @@ -123,7 +126,7 @@ class CompletionJobOut(BaseModel): suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: OptionalNullable[List[Integrations]] = UNSET + integrations: OptionalNullable[List[CompletionJobOutIntegration]] = UNSET r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: OptionalNullable[int] = UNSET @@ -131,10 +134,13 @@ class CompletionJobOut(BaseModel): metadata: OptionalNullable[JobMetadataOut] = UNSET - job_type: Optional[JobType] = "completion" + JOB_TYPE: Annotated[ + Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], + pydantic.Field(alias="job_type"), + ] = "completion" r"""The type of job (`FT` for fine-tuning).""" - repositories: Optional[List[Repositories]] = None + repositories: Optional[List[CompletionJobOutRepository]] = None @model_serializer(mode="wrap") def serialize_model(self, handler): @@ -146,7 +152,6 @@ def serialize_model(self, handler): "integrations", "trained_tokens", "metadata", - "job_type", "repositories", ] nullable_fields = [ diff --git a/src/mistralai/client/models/conversationevents.py b/src/mistralai/client/models/conversationevents.py index 308588a1..1c2b4592 100644 --- a/src/mistralai/client/models/conversationevents.py +++ b/src/mistralai/client/models/conversationevents.py @@ -25,8 +25,7 @@ ToolExecutionStartedEventTypedDict, ) from mistralai.client.types import BaseModel -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -50,18 +49,18 @@ ConversationEventsData = Annotated[ Union[ - Annotated[AgentHandoffDoneEvent, Tag("agent.handoff.done")], - Annotated[AgentHandoffStartedEvent, Tag("agent.handoff.started")], - Annotated[ResponseDoneEvent, Tag("conversation.response.done")], - Annotated[ResponseErrorEvent, Tag("conversation.response.error")], - Annotated[ResponseStartedEvent, Tag("conversation.response.started")], - Annotated[FunctionCallEvent, Tag("function.call.delta")], - Annotated[MessageOutputEvent, Tag("message.output.delta")], - Annotated[ToolExecutionDeltaEvent, Tag("tool.execution.delta")], - Annotated[ToolExecutionDoneEvent, Tag("tool.execution.done")], - Annotated[ToolExecutionStartedEvent, Tag("tool.execution.started")], + AgentHandoffDoneEvent, + AgentHandoffStartedEvent, + ResponseDoneEvent, + ResponseErrorEvent, + ResponseStartedEvent, + FunctionCallEvent, + MessageOutputEvent, + ToolExecutionDeltaEvent, + ToolExecutionDoneEvent, + ToolExecutionStartedEvent, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Field(discriminator="TYPE"), ] diff --git a/src/mistralai/client/models/conversationhistory.py b/src/mistralai/client/models/conversationhistory.py index 40bd1e72..83e860f2 100644 --- a/src/mistralai/client/models/conversationhistory.py +++ b/src/mistralai/client/models/conversationhistory.py @@ -15,8 +15,8 @@ ConversationHistoryObject = Literal["conversation.history",] -EntriesTypedDict = TypeAliasType( - "EntriesTypedDict", +EntryTypedDict = TypeAliasType( + "EntryTypedDict", Union[ FunctionResultEntryTypedDict, MessageInputEntryTypedDict, @@ -28,8 +28,8 @@ ) -Entries = TypeAliasType( - "Entries", +Entry = TypeAliasType( + "Entry", Union[ FunctionResultEntry, MessageInputEntry, @@ -45,7 +45,7 @@ class ConversationHistoryTypedDict(TypedDict): r"""Retrieve all entries in a conversation.""" conversation_id: str - entries: List[EntriesTypedDict] + entries: List[EntryTypedDict] object: NotRequired[ConversationHistoryObject] @@ -54,6 +54,6 @@ class ConversationHistory(BaseModel): conversation_id: str - entries: List[Entries] + entries: List[Entry] object: Optional[ConversationHistoryObject] = "conversation.history" diff --git a/src/mistralai/client/models/conversationrequest.py b/src/mistralai/client/models/conversationrequest.py index e3211c4c..dd66c6ce 100644 --- a/src/mistralai/client/models/conversationrequest.py +++ b/src/mistralai/client/models/conversationrequest.py @@ -16,20 +16,19 @@ UNSET, UNSET_SENTINEL, ) -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer +from pydantic import Field, model_serializer from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -HandoffExecution = Literal[ +ConversationRequestHandoffExecution = Literal[ "client", "server", ] -ToolsTypedDict = TypeAliasType( - "ToolsTypedDict", +ConversationRequestToolTypedDict = TypeAliasType( + "ConversationRequestToolTypedDict", Union[ WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, @@ -41,39 +40,43 @@ ) -Tools = Annotated[ +ConversationRequestTool = Annotated[ Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Field(discriminator="TYPE"), ] -AgentVersionTypedDict = TypeAliasType("AgentVersionTypedDict", Union[str, int]) +ConversationRequestAgentVersionTypedDict = TypeAliasType( + "ConversationRequestAgentVersionTypedDict", Union[str, int] +) -AgentVersion = TypeAliasType("AgentVersion", Union[str, int]) +ConversationRequestAgentVersion = TypeAliasType( + "ConversationRequestAgentVersion", Union[str, int] +) class ConversationRequestTypedDict(TypedDict): inputs: ConversationInputsTypedDict stream: NotRequired[bool] store: NotRequired[Nullable[bool]] - handoff_execution: NotRequired[Nullable[HandoffExecution]] + handoff_execution: NotRequired[Nullable[ConversationRequestHandoffExecution]] instructions: NotRequired[Nullable[str]] - tools: NotRequired[List[ToolsTypedDict]] + tools: NotRequired[List[ConversationRequestToolTypedDict]] r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] metadata: NotRequired[Nullable[Dict[str, Any]]] agent_id: NotRequired[Nullable[str]] - agent_version: NotRequired[Nullable[AgentVersionTypedDict]] + agent_version: NotRequired[Nullable[ConversationRequestAgentVersionTypedDict]] model: NotRequired[Nullable[str]] @@ -84,11 +87,11 @@ class ConversationRequest(BaseModel): store: OptionalNullable[bool] = UNSET - handoff_execution: OptionalNullable[HandoffExecution] = UNSET + handoff_execution: OptionalNullable[ConversationRequestHandoffExecution] = UNSET instructions: OptionalNullable[str] = UNSET - tools: Optional[List[Tools]] = None + tools: Optional[List[ConversationRequestTool]] = None r"""List of tools which are available to the model during the conversation.""" completion_args: OptionalNullable[CompletionArgs] = UNSET @@ -101,7 +104,7 @@ class ConversationRequest(BaseModel): agent_id: OptionalNullable[str] = UNSET - agent_version: OptionalNullable[AgentVersion] = UNSET + agent_version: OptionalNullable[ConversationRequestAgentVersion] = UNSET model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/client/models/conversationresponse.py b/src/mistralai/client/models/conversationresponse.py index 32d0f28f..0a11fff8 100644 --- a/src/mistralai/client/models/conversationresponse.py +++ b/src/mistralai/client/models/conversationresponse.py @@ -14,8 +14,8 @@ ConversationResponseObject = Literal["conversation.response",] -OutputsTypedDict = TypeAliasType( - "OutputsTypedDict", +OutputTypedDict = TypeAliasType( + "OutputTypedDict", Union[ ToolExecutionEntryTypedDict, FunctionCallEntryTypedDict, @@ -25,8 +25,8 @@ ) -Outputs = TypeAliasType( - "Outputs", +Output = TypeAliasType( + "Output", Union[ToolExecutionEntry, FunctionCallEntry, MessageOutputEntry, AgentHandoffEntry], ) @@ -35,7 +35,7 @@ class ConversationResponseTypedDict(TypedDict): r"""The response after appending new entries to the conversation.""" conversation_id: str - outputs: List[OutputsTypedDict] + outputs: List[OutputTypedDict] usage: ConversationUsageInfoTypedDict object: NotRequired[ConversationResponseObject] @@ -45,7 +45,7 @@ class ConversationResponse(BaseModel): conversation_id: str - outputs: List[Outputs] + outputs: List[Output] usage: ConversationUsageInfo diff --git a/src/mistralai/client/models/conversationstreamrequest.py b/src/mistralai/client/models/conversationstreamrequest.py index 219230a2..9b8d0c44 100644 --- a/src/mistralai/client/models/conversationstreamrequest.py +++ b/src/mistralai/client/models/conversationstreamrequest.py @@ -16,8 +16,7 @@ UNSET, UNSET_SENTINEL, ) -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer +from pydantic import Field, model_serializer from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -28,8 +27,8 @@ ] -ConversationStreamRequestToolsTypedDict = TypeAliasType( - "ConversationStreamRequestToolsTypedDict", +ConversationStreamRequestToolTypedDict = TypeAliasType( + "ConversationStreamRequestToolTypedDict", Union[ WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, @@ -41,16 +40,16 @@ ) -ConversationStreamRequestTools = Annotated[ +ConversationStreamRequestTool = Annotated[ Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Field(discriminator="TYPE"), ] @@ -70,7 +69,7 @@ class ConversationStreamRequestTypedDict(TypedDict): store: NotRequired[Nullable[bool]] handoff_execution: NotRequired[Nullable[ConversationStreamRequestHandoffExecution]] instructions: NotRequired[Nullable[str]] - tools: NotRequired[List[ConversationStreamRequestToolsTypedDict]] + tools: NotRequired[List[ConversationStreamRequestToolTypedDict]] r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] name: NotRequired[Nullable[str]] @@ -94,7 +93,7 @@ class ConversationStreamRequest(BaseModel): instructions: OptionalNullable[str] = UNSET - tools: Optional[List[ConversationStreamRequestTools]] = None + tools: Optional[List[ConversationStreamRequestTool]] = None r"""List of tools which are available to the model during the conversation.""" completion_args: OptionalNullable[CompletionArgs] = UNSET diff --git a/src/mistralai/client/models/deltamessage.py b/src/mistralai/client/models/deltamessage.py index 0ae56da8..fc08d62a 100644 --- a/src/mistralai/client/models/deltamessage.py +++ b/src/mistralai/client/models/deltamessage.py @@ -15,24 +15,26 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ContentTypedDict = TypeAliasType( - "ContentTypedDict", Union[str, List[ContentChunkTypedDict]] +DeltaMessageContentTypedDict = TypeAliasType( + "DeltaMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] ) -Content = TypeAliasType("Content", Union[str, List[ContentChunk]]) +DeltaMessageContent = TypeAliasType( + "DeltaMessageContent", Union[str, List[ContentChunk]] +) class DeltaMessageTypedDict(TypedDict): role: NotRequired[Nullable[str]] - content: NotRequired[Nullable[ContentTypedDict]] + content: NotRequired[Nullable[DeltaMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] class DeltaMessage(BaseModel): role: OptionalNullable[str] = UNSET - content: OptionalNullable[Content] = UNSET + content: OptionalNullable[DeltaMessageContent] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET diff --git a/src/mistralai/client/models/documentlibrarytool.py b/src/mistralai/client/models/documentlibrarytool.py index 861a58d3..21eab39e 100644 --- a/src/mistralai/client/models/documentlibrarytool.py +++ b/src/mistralai/client/models/documentlibrarytool.py @@ -2,21 +2,27 @@ from __future__ import annotations from mistralai.client.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -DocumentLibraryToolType = Literal["document_library",] +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal +from typing_extensions import Annotated, TypedDict class DocumentLibraryToolTypedDict(TypedDict): library_ids: List[str] r"""Ids of the library in which to search.""" - type: NotRequired[DocumentLibraryToolType] + type: Literal["document_library"] class DocumentLibraryTool(BaseModel): library_ids: List[str] r"""Ids of the library in which to search.""" - type: Optional[DocumentLibraryToolType] = "document_library" + TYPE: Annotated[ + Annotated[ + Literal["document_library"], + AfterValidator(validate_const("document_library")), + ], + pydantic.Field(alias="type"), + ] = "document_library" diff --git a/src/mistralai/client/models/files_api_routes_upload_fileop.py b/src/mistralai/client/models/files_api_routes_upload_fileop.py index 723c6cc2..ab2f1524 100644 --- a/src/mistralai/client/models/files_api_routes_upload_fileop.py +++ b/src/mistralai/client/models/files_api_routes_upload_fileop.py @@ -9,7 +9,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): +class MultiPartBodyParamsTypedDict(TypedDict): file: FileTypedDict r"""The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: @@ -24,7 +24,7 @@ class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): purpose: NotRequired[FilePurpose] -class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): +class MultiPartBodyParams(BaseModel): file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] r"""The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: diff --git a/src/mistralai/client/models/ftclassifierlossfunction.py b/src/mistralai/client/models/ftclassifierlossfunction.py index c4ef66e0..e6781a5e 100644 --- a/src/mistralai/client/models/ftclassifierlossfunction.py +++ b/src/mistralai/client/models/ftclassifierlossfunction.py @@ -1,10 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union -FTClassifierLossFunction = Literal[ - "single_class", - "multi_class", +FTClassifierLossFunction = Union[ + Literal[ + "single_class", + "multi_class", + ], + UnrecognizedStr, ] diff --git a/src/mistralai/client/models/ftmodelcard.py b/src/mistralai/client/models/ftmodelcard.py index 36cb723d..06f088ec 100644 --- a/src/mistralai/client/models/ftmodelcard.py +++ b/src/mistralai/client/models/ftmodelcard.py @@ -18,9 +18,6 @@ from typing_extensions import Annotated, NotRequired, TypedDict -FTModelCardType = Literal["fine-tuned",] - - class FTModelCardTypedDict(TypedDict): r"""Extra fields for fine-tuned models.""" @@ -38,7 +35,7 @@ class FTModelCardTypedDict(TypedDict): deprecation: NotRequired[Nullable[datetime]] deprecation_replacement_model: NotRequired[Nullable[str]] default_model_temperature: NotRequired[Nullable[float]] - type: FTModelCardType + type: Literal["fine-tuned"] archived: NotRequired[bool] @@ -74,9 +71,7 @@ class FTModelCard(BaseModel): default_model_temperature: OptionalNullable[float] = UNSET TYPE: Annotated[ - Annotated[ - Optional[FTModelCardType], AfterValidator(validate_const("fine-tuned")) - ], + Annotated[Literal["fine-tuned"], AfterValidator(validate_const("fine-tuned"))], pydantic.Field(alias="type"), ] = "fine-tuned" @@ -95,7 +90,6 @@ def serialize_model(self, handler): "deprecation", "deprecation_replacement_model", "default_model_temperature", - "type", "archived", ] nullable_fields = [ diff --git a/src/mistralai/client/models/functioncallevent.py b/src/mistralai/client/models/functioncallevent.py index 4e040585..8146fa5c 100644 --- a/src/mistralai/client/models/functioncallevent.py +++ b/src/mistralai/client/models/functioncallevent.py @@ -3,11 +3,11 @@ from __future__ import annotations from datetime import datetime from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -FunctionCallEventType = Literal["function.call.delta",] +from typing_extensions import Annotated, NotRequired, TypedDict class FunctionCallEventTypedDict(TypedDict): @@ -15,7 +15,7 @@ class FunctionCallEventTypedDict(TypedDict): name: str tool_call_id: str arguments: str - type: NotRequired[FunctionCallEventType] + type: Literal["function.call.delta"] created_at: NotRequired[datetime] output_index: NotRequired[int] @@ -29,7 +29,13 @@ class FunctionCallEvent(BaseModel): arguments: str - type: Optional[FunctionCallEventType] = "function.call.delta" + TYPE: Annotated[ + Annotated[ + Literal["function.call.delta"], + AfterValidator(validate_const("function.call.delta")), + ], + pydantic.Field(alias="type"), + ] = "function.call.delta" created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/functiontool.py b/src/mistralai/client/models/functiontool.py index 74b50d1b..16abcbf3 100644 --- a/src/mistralai/client/models/functiontool.py +++ b/src/mistralai/client/models/functiontool.py @@ -3,19 +3,22 @@ from __future__ import annotations from .function import Function, FunctionTypedDict from mistralai.client.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -FunctionToolType = Literal["function",] +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict class FunctionToolTypedDict(TypedDict): function: FunctionTypedDict - type: NotRequired[FunctionToolType] + type: Literal["function"] class FunctionTool(BaseModel): function: Function - type: Optional[FunctionToolType] = "function" + TYPE: Annotated[ + Annotated[Literal["function"], AfterValidator(validate_const("function"))], + pydantic.Field(alias="type"), + ] = "function" diff --git a/src/mistralai/client/models/githubrepositoryin.py b/src/mistralai/client/models/githubrepositoryin.py index e56fef9b..4e4b4777 100644 --- a/src/mistralai/client/models/githubrepositoryin.py +++ b/src/mistralai/client/models/githubrepositoryin.py @@ -8,19 +8,19 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -GithubRepositoryInType = Literal["github",] +from typing_extensions import Annotated, NotRequired, TypedDict class GithubRepositoryInTypedDict(TypedDict): name: str owner: str token: str - type: NotRequired[GithubRepositoryInType] + type: Literal["github"] ref: NotRequired[Nullable[str]] weight: NotRequired[float] @@ -32,7 +32,10 @@ class GithubRepositoryIn(BaseModel): token: str - type: Optional[GithubRepositoryInType] = "github" + TYPE: Annotated[ + Annotated[Literal["github"], AfterValidator(validate_const("github"))], + pydantic.Field(alias="type"), + ] = "github" ref: OptionalNullable[str] = UNSET @@ -40,7 +43,7 @@ class GithubRepositoryIn(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "ref", "weight"] + optional_fields = ["ref", "weight"] nullable_fields = ["ref"] null_default_fields = [] diff --git a/src/mistralai/client/models/githubrepositoryout.py b/src/mistralai/client/models/githubrepositoryout.py index e3aa9ebc..1f738708 100644 --- a/src/mistralai/client/models/githubrepositoryout.py +++ b/src/mistralai/client/models/githubrepositoryout.py @@ -8,19 +8,19 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -GithubRepositoryOutType = Literal["github",] +from typing_extensions import Annotated, NotRequired, TypedDict class GithubRepositoryOutTypedDict(TypedDict): name: str owner: str commit_id: str - type: NotRequired[GithubRepositoryOutType] + type: Literal["github"] ref: NotRequired[Nullable[str]] weight: NotRequired[float] @@ -32,7 +32,10 @@ class GithubRepositoryOut(BaseModel): commit_id: str - type: Optional[GithubRepositoryOutType] = "github" + TYPE: Annotated[ + Annotated[Literal["github"], AfterValidator(validate_const("github"))], + pydantic.Field(alias="type"), + ] = "github" ref: OptionalNullable[str] = UNSET @@ -40,7 +43,7 @@ class GithubRepositoryOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "ref", "weight"] + optional_fields = ["ref", "weight"] nullable_fields = ["ref"] null_default_fields = [] diff --git a/src/mistralai/client/models/imagegenerationtool.py b/src/mistralai/client/models/imagegenerationtool.py index e09dba81..c5dbda3f 100644 --- a/src/mistralai/client/models/imagegenerationtool.py +++ b/src/mistralai/client/models/imagegenerationtool.py @@ -2,16 +2,22 @@ from __future__ import annotations from mistralai.client.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ImageGenerationToolType = Literal["image_generation",] +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict class ImageGenerationToolTypedDict(TypedDict): - type: NotRequired[ImageGenerationToolType] + type: Literal["image_generation"] class ImageGenerationTool(BaseModel): - type: Optional[ImageGenerationToolType] = "image_generation" + TYPE: Annotated[ + Annotated[ + Literal["image_generation"], + AfterValidator(validate_const("image_generation")), + ], + pydantic.Field(alias="type"), + ] = "image_generation" diff --git a/src/mistralai/client/models/imageurlchunk.py b/src/mistralai/client/models/imageurlchunk.py index f967a3c8..9968ed74 100644 --- a/src/mistralai/client/models/imageurlchunk.py +++ b/src/mistralai/client/models/imageurlchunk.py @@ -7,12 +7,12 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ImageURLChunkImageURLTypedDict = TypeAliasType( - "ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str] +ImageURLUnionTypedDict = TypeAliasType( + "ImageURLUnionTypedDict", Union[ImageURLTypedDict, str] ) -ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) +ImageURLUnion = TypeAliasType("ImageURLUnion", Union[ImageURL, str]) ImageURLChunkType = Literal["image_url",] @@ -21,13 +21,13 @@ class ImageURLChunkTypedDict(TypedDict): r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - image_url: ImageURLChunkImageURLTypedDict + image_url: ImageURLUnionTypedDict type: NotRequired[ImageURLChunkType] class ImageURLChunk(BaseModel): r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - image_url: ImageURLChunkImageURL + image_url: ImageURLUnion type: Optional[ImageURLChunkType] = "image_url" diff --git a/src/mistralai/client/models/inputs.py b/src/mistralai/client/models/inputs.py index fb067476..2b8b2f5f 100644 --- a/src/mistralai/client/models/inputs.py +++ b/src/mistralai/client/models/inputs.py @@ -13,8 +13,8 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -InstructRequestInputsMessagesTypedDict = TypeAliasType( - "InstructRequestInputsMessagesTypedDict", +InputsMessageTypedDict = TypeAliasType( + "InputsMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -24,7 +24,7 @@ ) -InstructRequestInputsMessages = Annotated[ +InputsMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -36,11 +36,11 @@ class InstructRequestInputsTypedDict(TypedDict): - messages: List[InstructRequestInputsMessagesTypedDict] + messages: List[InputsMessageTypedDict] class InstructRequestInputs(BaseModel): - messages: List[InstructRequestInputsMessages] + messages: List[InputsMessage] InputsTypedDict = TypeAliasType( diff --git a/src/mistralai/client/models/instructrequest.py b/src/mistralai/client/models/instructrequest.py index 1b2f2693..73d482d8 100644 --- a/src/mistralai/client/models/instructrequest.py +++ b/src/mistralai/client/models/instructrequest.py @@ -12,8 +12,8 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -InstructRequestMessagesTypedDict = TypeAliasType( - "InstructRequestMessagesTypedDict", +InstructRequestMessageTypedDict = TypeAliasType( + "InstructRequestMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -23,7 +23,7 @@ ) -InstructRequestMessages = Annotated[ +InstructRequestMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -35,8 +35,8 @@ class InstructRequestTypedDict(TypedDict): - messages: List[InstructRequestMessagesTypedDict] + messages: List[InstructRequestMessageTypedDict] class InstructRequest(BaseModel): - messages: List[InstructRequestMessages] + messages: List[InstructRequestMessage] diff --git a/src/mistralai/client/models/jobin.py b/src/mistralai/client/models/jobin.py index dc7684fc..23a431c9 100644 --- a/src/mistralai/client/models/jobin.py +++ b/src/mistralai/client/models/jobin.py @@ -26,10 +26,10 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -JobInIntegrationsTypedDict = WandbIntegrationTypedDict +JobInIntegrationTypedDict = WandbIntegrationTypedDict -JobInIntegrations = WandbIntegration +JobInIntegration = WandbIntegration HyperparametersTypedDict = TypeAliasType( @@ -46,10 +46,10 @@ ) -JobInRepositoriesTypedDict = GithubRepositoryInTypedDict +JobInRepositoryTypedDict = GithubRepositoryInTypedDict -JobInRepositories = GithubRepositoryIn +JobInRepository = GithubRepositoryIn class JobInTypedDict(TypedDict): @@ -61,13 +61,13 @@ class JobInTypedDict(TypedDict): r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" suffix: NotRequired[Nullable[str]] r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" - integrations: NotRequired[Nullable[List[JobInIntegrationsTypedDict]]] + integrations: NotRequired[Nullable[List[JobInIntegrationTypedDict]]] r"""A list of integrations to enable for your fine-tuning job.""" auto_start: NotRequired[bool] r"""This field will be required in a future release.""" invalid_sample_skip_percentage: NotRequired[float] job_type: NotRequired[Nullable[FineTuneableModelType]] - repositories: NotRequired[Nullable[List[JobInRepositoriesTypedDict]]] + repositories: NotRequired[Nullable[List[JobInRepositoryTypedDict]]] classifier_targets: NotRequired[Nullable[List[ClassifierTargetInTypedDict]]] @@ -85,7 +85,7 @@ class JobIn(BaseModel): suffix: OptionalNullable[str] = UNSET r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" - integrations: OptionalNullable[List[JobInIntegrations]] = UNSET + integrations: OptionalNullable[List[JobInIntegration]] = UNSET r"""A list of integrations to enable for your fine-tuning job.""" auto_start: Optional[bool] = None @@ -95,7 +95,7 @@ class JobIn(BaseModel): job_type: OptionalNullable[FineTuneableModelType] = UNSET - repositories: OptionalNullable[List[JobInRepositories]] = UNSET + repositories: OptionalNullable[List[JobInRepository]] = UNSET classifier_targets: OptionalNullable[List[ClassifierTargetIn]] = UNSET diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py index b36d3c3e..5d9c026b 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py @@ -10,8 +10,8 @@ CompletionDetailedJobOutTypedDict, ) from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -36,10 +36,7 @@ class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): JobsAPIRoutesFineTuningCancelFineTuningJobResponse = Annotated[ - Union[ - Annotated[ClassifierDetailedJobOut, Tag("classifier")], - Annotated[CompletionDetailedJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), + Union[ClassifierDetailedJobOut, CompletionDetailedJobOut], + Field(discriminator="JOB_TYPE"), ] r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py index ece0d15a..c54aaa5e 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py @@ -4,35 +4,30 @@ from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutTypedDict -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType -Response1TypedDict = TypeAliasType( - "Response1TypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] +ResponseTypedDict = TypeAliasType( + "ResponseTypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] ) -Response1 = Annotated[ - Union[ - Annotated[ClassifierJobOut, Tag("classifier")], - Annotated[CompletionJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +Response = Annotated[ + Union[ClassifierJobOut, CompletionJobOut], Field(discriminator="JOB_TYPE") ] JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = TypeAliasType( "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", - Union[LegacyJobMetadataOutTypedDict, Response1TypedDict], + Union[LegacyJobMetadataOutTypedDict, ResponseTypedDict], ) r"""OK""" JobsAPIRoutesFineTuningCreateFineTuningJobResponse = TypeAliasType( "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", - Union[LegacyJobMetadataOut, Response1], + Union[LegacyJobMetadataOut, Response], ) r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py index aa5a2609..8837d262 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py @@ -10,8 +10,8 @@ CompletionDetailedJobOutTypedDict, ) from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -36,10 +36,7 @@ class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): JobsAPIRoutesFineTuningGetFineTuningJobResponse = Annotated[ - Union[ - Annotated[ClassifierDetailedJobOut, Tag("classifier")], - Annotated[CompletionDetailedJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), + Union[ClassifierDetailedJobOut, CompletionDetailedJobOut], + Field(discriminator="JOB_TYPE"), ] r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py index 7e399b31..8c19bacb 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py @@ -15,7 +15,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -QueryParamStatus = Literal[ +JobsAPIRoutesFineTuningGetFineTuningJobsStatus = Literal[ "QUEUED", "STARTED", "VALIDATING", @@ -42,7 +42,7 @@ class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): created_before: NotRequired[Nullable[datetime]] created_by_me: NotRequired[bool] r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" - status: NotRequired[Nullable[QueryParamStatus]] + status: NotRequired[Nullable[JobsAPIRoutesFineTuningGetFineTuningJobsStatus]] r"""The current job state to filter on. When set, the other results are not displayed.""" wandb_project: NotRequired[Nullable[str]] r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" @@ -89,7 +89,7 @@ class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" status: Annotated[ - OptionalNullable[QueryParamStatus], + OptionalNullable[JobsAPIRoutesFineTuningGetFineTuningJobsStatus], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET r"""The current job state to filter on. When set, the other results are not displayed.""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py index ed5938b0..91d581eb 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py @@ -10,8 +10,8 @@ CompletionDetailedJobOutTypedDict, ) from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -34,10 +34,7 @@ class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): JobsAPIRoutesFineTuningStartFineTuningJobResponse = Annotated[ - Union[ - Annotated[ClassifierDetailedJobOut, Tag("classifier")], - Annotated[CompletionDetailedJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), + Union[ClassifierDetailedJobOut, CompletionDetailedJobOut], + Field(discriminator="JOB_TYPE"), ] r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py index a2b70b37..760c22f4 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py @@ -5,13 +5,8 @@ from .completionftmodelout import CompletionFTModelOut, CompletionFTModelOutTypedDict from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict from mistralai.client.types import BaseModel -from mistralai.client.utils import ( - FieldMetadata, - PathParamMetadata, - RequestMetadata, - get_discriminator, -) -from pydantic import Discriminator, Tag +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -42,10 +37,6 @@ class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): JobsAPIRoutesFineTuningUpdateFineTunedModelResponse = Annotated[ - Union[ - Annotated[ClassifierFTModelOut, Tag("classifier")], - Annotated[CompletionFTModelOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "model_type", "model_type")), + Union[ClassifierFTModelOut, CompletionFTModelOut], Field(discriminator="MODEL_TYPE") ] r"""OK""" diff --git a/src/mistralai/client/models/jobsout.py b/src/mistralai/client/models/jobsout.py index 9087704f..7727d56c 100644 --- a/src/mistralai/client/models/jobsout.py +++ b/src/mistralai/client/models/jobsout.py @@ -4,8 +4,7 @@ from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict from mistralai.client.types import BaseModel -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag +from pydantic import Field from typing import List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -16,11 +15,7 @@ JobsOutData = Annotated[ - Union[ - Annotated[ClassifierJobOut, Tag("classifier")], - Annotated[CompletionJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), + Union[ClassifierJobOut, CompletionJobOut], Field(discriminator="JOB_TYPE") ] diff --git a/src/mistralai/client/models/libraries_documents_upload_v1op.py b/src/mistralai/client/models/libraries_documents_upload_v1op.py index e2d59d9f..18a5b780 100644 --- a/src/mistralai/client/models/libraries_documents_upload_v1op.py +++ b/src/mistralai/client/models/libraries_documents_upload_v1op.py @@ -12,7 +12,7 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsUploadV1DocumentUploadTypedDict(TypedDict): +class DocumentUploadTypedDict(TypedDict): file: FileTypedDict r"""The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: @@ -26,7 +26,7 @@ class LibrariesDocumentsUploadV1DocumentUploadTypedDict(TypedDict): """ -class LibrariesDocumentsUploadV1DocumentUpload(BaseModel): +class DocumentUpload(BaseModel): file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] r"""The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: @@ -42,7 +42,7 @@ class LibrariesDocumentsUploadV1DocumentUpload(BaseModel): class LibrariesDocumentsUploadV1RequestTypedDict(TypedDict): library_id: str - request_body: LibrariesDocumentsUploadV1DocumentUploadTypedDict + request_body: DocumentUploadTypedDict class LibrariesDocumentsUploadV1Request(BaseModel): @@ -51,6 +51,6 @@ class LibrariesDocumentsUploadV1Request(BaseModel): ] request_body: Annotated[ - LibrariesDocumentsUploadV1DocumentUpload, + DocumentUpload, FieldMetadata(request=RequestMetadata(media_type="multipart/form-data")), ] diff --git a/src/mistralai/client/models/messageinputentry.py b/src/mistralai/client/models/messageinputentry.py index 12a31097..a72319cf 100644 --- a/src/mistralai/client/models/messageinputentry.py +++ b/src/mistralai/client/models/messageinputentry.py @@ -12,21 +12,25 @@ OptionalNullable, UNSET, UNSET_SENTINEL, + UnrecognizedStr, ) from pydantic import model_serializer from typing import List, Literal, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict -Object = Literal["entry",] +MessageInputEntryObject = Literal["entry",] MessageInputEntryType = Literal["message.input",] -MessageInputEntryRole = Literal[ - "assistant", - "user", +MessageInputEntryRole = Union[ + Literal[ + "assistant", + "user", + ], + UnrecognizedStr, ] @@ -46,7 +50,7 @@ class MessageInputEntryTypedDict(TypedDict): role: MessageInputEntryRole content: MessageInputEntryContentTypedDict - object: NotRequired[Object] + object: NotRequired[MessageInputEntryObject] type: NotRequired[MessageInputEntryType] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] @@ -61,7 +65,7 @@ class MessageInputEntry(BaseModel): content: MessageInputEntryContent - object: Optional[Object] = "entry" + object: Optional[MessageInputEntryObject] = "entry" type: Optional[MessageInputEntryType] = "message.input" diff --git a/src/mistralai/client/models/messageoutputevent.py b/src/mistralai/client/models/messageoutputevent.py index 3db7f5a0..447e3867 100644 --- a/src/mistralai/client/models/messageoutputevent.py +++ b/src/mistralai/client/models/messageoutputevent.py @@ -10,12 +10,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -MessageOutputEventType = Literal["message.output.delta",] +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict MessageOutputEventRole = Literal["assistant",] @@ -34,7 +34,7 @@ class MessageOutputEventTypedDict(TypedDict): id: str content: MessageOutputEventContentTypedDict - type: NotRequired[MessageOutputEventType] + type: Literal["message.output.delta"] created_at: NotRequired[datetime] output_index: NotRequired[int] content_index: NotRequired[int] @@ -48,7 +48,13 @@ class MessageOutputEvent(BaseModel): content: MessageOutputEventContent - type: Optional[MessageOutputEventType] = "message.output.delta" + TYPE: Annotated[ + Annotated[ + Literal["message.output.delta"], + AfterValidator(validate_const("message.output.delta")), + ], + pydantic.Field(alias="type"), + ] = "message.output.delta" created_at: Optional[datetime] = None @@ -65,7 +71,6 @@ class MessageOutputEvent(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ - "type", "created_at", "output_index", "content_index", diff --git a/src/mistralai/client/models/modelconversation.py b/src/mistralai/client/models/modelconversation.py index 574f053d..d348072a 100644 --- a/src/mistralai/client/models/modelconversation.py +++ b/src/mistralai/client/models/modelconversation.py @@ -16,14 +16,13 @@ UNSET, UNSET_SENTINEL, ) -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer +from pydantic import Field, model_serializer from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -ModelConversationToolsTypedDict = TypeAliasType( - "ModelConversationToolsTypedDict", +ModelConversationToolTypedDict = TypeAliasType( + "ModelConversationToolTypedDict", Union[ WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, @@ -35,16 +34,16 @@ ) -ModelConversationTools = Annotated[ +ModelConversationTool = Annotated[ Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Field(discriminator="TYPE"), ] @@ -58,7 +57,7 @@ class ModelConversationTypedDict(TypedDict): model: str instructions: NotRequired[Nullable[str]] r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[ModelConversationToolsTypedDict]] + tools: NotRequired[List[ModelConversationToolTypedDict]] r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" @@ -83,7 +82,7 @@ class ModelConversation(BaseModel): instructions: OptionalNullable[str] = UNSET r"""Instruction prompt the model will follow during the conversation.""" - tools: Optional[List[ModelConversationTools]] = None + tools: Optional[List[ModelConversationTool]] = None r"""List of tools which are available to the model during the conversation.""" completion_args: Optional[CompletionArgs] = None diff --git a/src/mistralai/client/models/modellist.py b/src/mistralai/client/models/modellist.py index 6a5209fa..b357ae84 100644 --- a/src/mistralai/client/models/modellist.py +++ b/src/mistralai/client/models/modellist.py @@ -4,31 +4,27 @@ from .basemodelcard import BaseModelCard, BaseModelCardTypedDict from .ftmodelcard import FTModelCard, FTModelCardTypedDict from mistralai.client.types import BaseModel -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag +from pydantic import Field from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -DataTypedDict = TypeAliasType( - "DataTypedDict", Union[BaseModelCardTypedDict, FTModelCardTypedDict] +ModelListDataTypedDict = TypeAliasType( + "ModelListDataTypedDict", Union[BaseModelCardTypedDict, FTModelCardTypedDict] ) -Data = Annotated[ - Union[ - Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")] - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), +ModelListData = Annotated[ + Union[BaseModelCard, FTModelCard], Field(discriminator="TYPE") ] class ModelListTypedDict(TypedDict): object: NotRequired[str] - data: NotRequired[List[DataTypedDict]] + data: NotRequired[List[ModelListDataTypedDict]] class ModelList(BaseModel): object: Optional[str] = "list" - data: Optional[List[Data]] = None + data: Optional[List[ModelListData]] = None diff --git a/src/mistralai/client/models/ocrtableobject.py b/src/mistralai/client/models/ocrtableobject.py index 0c9091de..f3b0bc45 100644 --- a/src/mistralai/client/models/ocrtableobject.py +++ b/src/mistralai/client/models/ocrtableobject.py @@ -1,15 +1,18 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UnrecognizedStr import pydantic -from typing import Literal +from typing import Literal, Union from typing_extensions import Annotated, TypedDict -Format = Literal[ - "markdown", - "html", +Format = Union[ + Literal[ + "markdown", + "html", + ], + UnrecognizedStr, ] r"""Format of the table""" diff --git a/src/mistralai/client/models/realtimetranscriptionerrordetail.py b/src/mistralai/client/models/realtimetranscriptionerrordetail.py index 27bb8d87..e1f48379 100644 --- a/src/mistralai/client/models/realtimetranscriptionerrordetail.py +++ b/src/mistralai/client/models/realtimetranscriptionerrordetail.py @@ -6,23 +6,27 @@ from typing_extensions import TypeAliasType, TypedDict -MessageTypedDict = TypeAliasType("MessageTypedDict", Union[str, Dict[str, Any]]) +RealtimeTranscriptionErrorDetailMessageTypedDict = TypeAliasType( + "RealtimeTranscriptionErrorDetailMessageTypedDict", Union[str, Dict[str, Any]] +) r"""Human-readable error message.""" -Message = TypeAliasType("Message", Union[str, Dict[str, Any]]) +RealtimeTranscriptionErrorDetailMessage = TypeAliasType( + "RealtimeTranscriptionErrorDetailMessage", Union[str, Dict[str, Any]] +) r"""Human-readable error message.""" class RealtimeTranscriptionErrorDetailTypedDict(TypedDict): - message: MessageTypedDict + message: RealtimeTranscriptionErrorDetailMessageTypedDict r"""Human-readable error message.""" code: int r"""Internal error code for debugging.""" class RealtimeTranscriptionErrorDetail(BaseModel): - message: Message + message: RealtimeTranscriptionErrorDetailMessage r"""Human-readable error message.""" code: int diff --git a/src/mistralai/client/models/responsedoneevent.py b/src/mistralai/client/models/responsedoneevent.py index 54056256..283baa11 100644 --- a/src/mistralai/client/models/responsedoneevent.py +++ b/src/mistralai/client/models/responsedoneevent.py @@ -4,22 +4,28 @@ from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict from datetime import datetime from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ResponseDoneEventType = Literal["conversation.response.done",] +from typing_extensions import Annotated, NotRequired, TypedDict class ResponseDoneEventTypedDict(TypedDict): usage: ConversationUsageInfoTypedDict - type: NotRequired[ResponseDoneEventType] + type: Literal["conversation.response.done"] created_at: NotRequired[datetime] class ResponseDoneEvent(BaseModel): usage: ConversationUsageInfo - type: Optional[ResponseDoneEventType] = "conversation.response.done" + TYPE: Annotated[ + Annotated[ + Literal["conversation.response.done"], + AfterValidator(validate_const("conversation.response.done")), + ], + pydantic.Field(alias="type"), + ] = "conversation.response.done" created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/responseerrorevent.py b/src/mistralai/client/models/responseerrorevent.py index c9ef95a0..ee078963 100644 --- a/src/mistralai/client/models/responseerrorevent.py +++ b/src/mistralai/client/models/responseerrorevent.py @@ -3,17 +3,17 @@ from __future__ import annotations from datetime import datetime from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ResponseErrorEventType = Literal["conversation.response.error",] +from typing_extensions import Annotated, NotRequired, TypedDict class ResponseErrorEventTypedDict(TypedDict): message: str code: int - type: NotRequired[ResponseErrorEventType] + type: Literal["conversation.response.error"] created_at: NotRequired[datetime] @@ -22,6 +22,12 @@ class ResponseErrorEvent(BaseModel): code: int - type: Optional[ResponseErrorEventType] = "conversation.response.error" + TYPE: Annotated[ + Annotated[ + Literal["conversation.response.error"], + AfterValidator(validate_const("conversation.response.error")), + ], + pydantic.Field(alias="type"), + ] = "conversation.response.error" created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/responseformats.py b/src/mistralai/client/models/responseformats.py index cbf83ce7..b98cd098 100644 --- a/src/mistralai/client/models/responseformats.py +++ b/src/mistralai/client/models/responseformats.py @@ -1,11 +1,15 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union -ResponseFormats = Literal[ - "text", - "json_object", - "json_schema", +ResponseFormats = Union[ + Literal[ + "text", + "json_object", + "json_schema", + ], + UnrecognizedStr, ] diff --git a/src/mistralai/client/models/responsestartedevent.py b/src/mistralai/client/models/responsestartedevent.py index dc6a10f9..0841fd58 100644 --- a/src/mistralai/client/models/responsestartedevent.py +++ b/src/mistralai/client/models/responsestartedevent.py @@ -3,22 +3,28 @@ from __future__ import annotations from datetime import datetime from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ResponseStartedEventType = Literal["conversation.response.started",] +from typing_extensions import Annotated, NotRequired, TypedDict class ResponseStartedEventTypedDict(TypedDict): conversation_id: str - type: NotRequired[ResponseStartedEventType] + type: Literal["conversation.response.started"] created_at: NotRequired[datetime] class ResponseStartedEvent(BaseModel): conversation_id: str - type: Optional[ResponseStartedEventType] = "conversation.response.started" + TYPE: Annotated[ + Annotated[ + Literal["conversation.response.started"], + AfterValidator(validate_const("conversation.response.started")), + ], + pydantic.Field(alias="type"), + ] = "conversation.response.started" created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py index 7fdcd37d..96e5b57f 100644 --- a/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py +++ b/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py @@ -4,8 +4,8 @@ from .basemodelcard import BaseModelCard, BaseModelCardTypedDict from .ftmodelcard import FTModelCard, FTModelCardTypedDict from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -22,17 +22,14 @@ class RetrieveModelV1ModelsModelIDGetRequest(BaseModel): r"""The ID of the model to retrieve.""" -RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict = TypeAliasType( - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", +ResponseRetrieveModelV1ModelsModelIDGetTypedDict = TypeAliasType( + "ResponseRetrieveModelV1ModelsModelIDGetTypedDict", Union[BaseModelCardTypedDict, FTModelCardTypedDict], ) r"""Successful Response""" -RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet = Annotated[ - Union[ - Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")] - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), +ResponseRetrieveModelV1ModelsModelIDGet = Annotated[ + Union[BaseModelCard, FTModelCard], Field(discriminator="TYPE") ] r"""Successful Response""" diff --git a/src/mistralai/client/models/ssetypes.py b/src/mistralai/client/models/ssetypes.py index 796f0327..ac2722f1 100644 --- a/src/mistralai/client/models/ssetypes.py +++ b/src/mistralai/client/models/ssetypes.py @@ -1,19 +1,23 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union -SSETypes = Literal[ - "conversation.response.started", - "conversation.response.done", - "conversation.response.error", - "message.output.delta", - "tool.execution.started", - "tool.execution.delta", - "tool.execution.done", - "agent.handoff.started", - "agent.handoff.done", - "function.call.delta", +SSETypes = Union[ + Literal[ + "conversation.response.started", + "conversation.response.done", + "conversation.response.error", + "message.output.delta", + "tool.execution.started", + "tool.execution.delta", + "tool.execution.done", + "agent.handoff.started", + "agent.handoff.done", + "function.call.delta", + ], + UnrecognizedStr, ] r"""Server side events sent when streaming a conversation response.""" diff --git a/src/mistralai/client/models/systemmessage.py b/src/mistralai/client/models/systemmessage.py index 9e01bc57..245e7b61 100644 --- a/src/mistralai/client/models/systemmessage.py +++ b/src/mistralai/client/models/systemmessage.py @@ -6,8 +6,11 @@ SystemMessageContentChunksTypedDict, ) from mistralai.client.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict SystemMessageContentTypedDict = TypeAliasType( @@ -21,15 +24,15 @@ ) -Role = Literal["system",] - - class SystemMessageTypedDict(TypedDict): content: SystemMessageContentTypedDict - role: NotRequired[Role] + role: Literal["system"] class SystemMessage(BaseModel): content: SystemMessageContent - role: Optional[Role] = "system" + ROLE: Annotated[ + Annotated[Literal["system"], AfterValidator(validate_const("system"))], + pydantic.Field(alias="role"), + ] = "system" diff --git a/src/mistralai/client/models/toolchoiceenum.py b/src/mistralai/client/models/toolchoiceenum.py index 01f6f677..ba8195b8 100644 --- a/src/mistralai/client/models/toolchoiceenum.py +++ b/src/mistralai/client/models/toolchoiceenum.py @@ -1,12 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union -ToolChoiceEnum = Literal[ - "auto", - "none", - "any", - "required", +ToolChoiceEnum = Union[ + Literal[ + "auto", + "none", + "any", + "required", + ], + UnrecognizedStr, ] diff --git a/src/mistralai/client/models/toolexecutiondeltaevent.py b/src/mistralai/client/models/toolexecutiondeltaevent.py index 0268e6a0..aeda1472 100644 --- a/src/mistralai/client/models/toolexecutiondeltaevent.py +++ b/src/mistralai/client/models/toolexecutiondeltaevent.py @@ -4,11 +4,11 @@ from .builtinconnectors import BuiltInConnectors from datetime import datetime from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolExecutionDeltaEventType = Literal["tool.execution.delta",] +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ToolExecutionDeltaEventNameTypedDict = TypeAliasType( @@ -25,7 +25,7 @@ class ToolExecutionDeltaEventTypedDict(TypedDict): id: str name: ToolExecutionDeltaEventNameTypedDict arguments: str - type: NotRequired[ToolExecutionDeltaEventType] + type: Literal["tool.execution.delta"] created_at: NotRequired[datetime] output_index: NotRequired[int] @@ -37,7 +37,13 @@ class ToolExecutionDeltaEvent(BaseModel): arguments: str - type: Optional[ToolExecutionDeltaEventType] = "tool.execution.delta" + TYPE: Annotated[ + Annotated[ + Literal["tool.execution.delta"], + AfterValidator(validate_const("tool.execution.delta")), + ], + pydantic.Field(alias="type"), + ] = "tool.execution.delta" created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/toolexecutiondoneevent.py b/src/mistralai/client/models/toolexecutiondoneevent.py index 854baee9..88aa5124 100644 --- a/src/mistralai/client/models/toolexecutiondoneevent.py +++ b/src/mistralai/client/models/toolexecutiondoneevent.py @@ -4,11 +4,11 @@ from .builtinconnectors import BuiltInConnectors from datetime import datetime from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Any, Dict, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolExecutionDoneEventType = Literal["tool.execution.done",] +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ToolExecutionDoneEventNameTypedDict = TypeAliasType( @@ -24,7 +24,7 @@ class ToolExecutionDoneEventTypedDict(TypedDict): id: str name: ToolExecutionDoneEventNameTypedDict - type: NotRequired[ToolExecutionDoneEventType] + type: Literal["tool.execution.done"] created_at: NotRequired[datetime] output_index: NotRequired[int] info: NotRequired[Dict[str, Any]] @@ -35,7 +35,13 @@ class ToolExecutionDoneEvent(BaseModel): name: ToolExecutionDoneEventName - type: Optional[ToolExecutionDoneEventType] = "tool.execution.done" + TYPE: Annotated[ + Annotated[ + Literal["tool.execution.done"], + AfterValidator(validate_const("tool.execution.done")), + ], + pydantic.Field(alias="type"), + ] = "tool.execution.done" created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/toolexecutionentry.py b/src/mistralai/client/models/toolexecutionentry.py index 839709fb..530c9029 100644 --- a/src/mistralai/client/models/toolexecutionentry.py +++ b/src/mistralai/client/models/toolexecutionentry.py @@ -21,14 +21,18 @@ ToolExecutionEntryType = Literal["tool.execution",] -NameTypedDict = TypeAliasType("NameTypedDict", Union[BuiltInConnectors, str]) +ToolExecutionEntryNameTypedDict = TypeAliasType( + "ToolExecutionEntryNameTypedDict", Union[BuiltInConnectors, str] +) -Name = TypeAliasType("Name", Union[BuiltInConnectors, str]) +ToolExecutionEntryName = TypeAliasType( + "ToolExecutionEntryName", Union[BuiltInConnectors, str] +) class ToolExecutionEntryTypedDict(TypedDict): - name: NameTypedDict + name: ToolExecutionEntryNameTypedDict arguments: str object: NotRequired[ToolExecutionEntryObject] type: NotRequired[ToolExecutionEntryType] @@ -39,7 +43,7 @@ class ToolExecutionEntryTypedDict(TypedDict): class ToolExecutionEntry(BaseModel): - name: Name + name: ToolExecutionEntryName arguments: str diff --git a/src/mistralai/client/models/toolexecutionstartedevent.py b/src/mistralai/client/models/toolexecutionstartedevent.py index 66438cfc..3d5f49c7 100644 --- a/src/mistralai/client/models/toolexecutionstartedevent.py +++ b/src/mistralai/client/models/toolexecutionstartedevent.py @@ -4,11 +4,11 @@ from .builtinconnectors import BuiltInConnectors from datetime import datetime from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolExecutionStartedEventType = Literal["tool.execution.started",] +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ToolExecutionStartedEventNameTypedDict = TypeAliasType( @@ -25,7 +25,7 @@ class ToolExecutionStartedEventTypedDict(TypedDict): id: str name: ToolExecutionStartedEventNameTypedDict arguments: str - type: NotRequired[ToolExecutionStartedEventType] + type: Literal["tool.execution.started"] created_at: NotRequired[datetime] output_index: NotRequired[int] @@ -37,7 +37,13 @@ class ToolExecutionStartedEvent(BaseModel): arguments: str - type: Optional[ToolExecutionStartedEventType] = "tool.execution.started" + TYPE: Annotated[ + Annotated[ + Literal["tool.execution.started"], + AfterValidator(validate_const("tool.execution.started")), + ], + pydantic.Field(alias="type"), + ] = "tool.execution.started" created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/toolmessage.py b/src/mistralai/client/models/toolmessage.py index eae2d2ae..44fe63e7 100644 --- a/src/mistralai/client/models/toolmessage.py +++ b/src/mistralai/client/models/toolmessage.py @@ -9,9 +9,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ToolMessageContentTypedDict = TypeAliasType( @@ -22,14 +25,11 @@ ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) -ToolMessageRole = Literal["tool",] - - class ToolMessageTypedDict(TypedDict): content: Nullable[ToolMessageContentTypedDict] tool_call_id: NotRequired[Nullable[str]] name: NotRequired[Nullable[str]] - role: NotRequired[ToolMessageRole] + role: Literal["tool"] class ToolMessage(BaseModel): @@ -39,11 +39,14 @@ class ToolMessage(BaseModel): name: OptionalNullable[str] = UNSET - role: Optional[ToolMessageRole] = "tool" + ROLE: Annotated[ + Annotated[Literal["tool"], AfterValidator(validate_const("tool"))], + pydantic.Field(alias="role"), + ] = "tool" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["tool_call_id", "name", "role"] + optional_fields = ["tool_call_id", "name"] nullable_fields = ["content", "tool_call_id", "name"] null_default_fields = [] diff --git a/src/mistralai/client/models/transcriptionsegmentchunk.py b/src/mistralai/client/models/transcriptionsegmentchunk.py index c89d84fc..25e859e5 100644 --- a/src/mistralai/client/models/transcriptionsegmentchunk.py +++ b/src/mistralai/client/models/transcriptionsegmentchunk.py @@ -14,7 +14,7 @@ from typing_extensions import NotRequired, TypedDict -Type = Literal["transcription_segment",] +TranscriptionSegmentChunkType = Literal["transcription_segment",] class TranscriptionSegmentChunkTypedDict(TypedDict): @@ -23,7 +23,7 @@ class TranscriptionSegmentChunkTypedDict(TypedDict): end: float score: NotRequired[Nullable[float]] speaker_id: NotRequired[Nullable[str]] - type: NotRequired[Type] + type: NotRequired[TranscriptionSegmentChunkType] class TranscriptionSegmentChunk(BaseModel): @@ -42,7 +42,7 @@ class TranscriptionSegmentChunk(BaseModel): speaker_id: OptionalNullable[str] = UNSET - type: Optional[Type] = "transcription_segment" + type: Optional[TranscriptionSegmentChunkType] = "transcription_segment" @property def additional_properties(self): diff --git a/src/mistralai/client/models/transcriptionstreamdone.py b/src/mistralai/client/models/transcriptionstreamdone.py index add17f56..9ba2aeff 100644 --- a/src/mistralai/client/models/transcriptionstreamdone.py +++ b/src/mistralai/client/models/transcriptionstreamdone.py @@ -7,13 +7,12 @@ ) from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.client.utils import validate_const import pydantic from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator from typing import Any, Dict, List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamDoneType = Literal["transcription.done",] +from typing_extensions import Annotated, NotRequired, TypedDict class TranscriptionStreamDoneTypedDict(TypedDict): @@ -22,7 +21,7 @@ class TranscriptionStreamDoneTypedDict(TypedDict): usage: UsageInfoTypedDict language: Nullable[str] segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] - type: NotRequired[TranscriptionStreamDoneType] + type: Literal["transcription.done"] class TranscriptionStreamDone(BaseModel): @@ -41,7 +40,13 @@ class TranscriptionStreamDone(BaseModel): segments: Optional[List[TranscriptionSegmentChunk]] = None - type: Optional[TranscriptionStreamDoneType] = "transcription.done" + TYPE: Annotated[ + Annotated[ + Literal["transcription.done"], + AfterValidator(validate_const("transcription.done")), + ], + pydantic.Field(alias="type"), + ] = "transcription.done" @property def additional_properties(self): @@ -53,7 +58,7 @@ def additional_properties(self, value): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["segments", "type"] + optional_fields = ["segments"] nullable_fields = ["language"] null_default_fields = [] diff --git a/src/mistralai/client/models/transcriptionstreamevents.py b/src/mistralai/client/models/transcriptionstreamevents.py index caaf943a..63a08fb5 100644 --- a/src/mistralai/client/models/transcriptionstreamevents.py +++ b/src/mistralai/client/models/transcriptionstreamevents.py @@ -19,8 +19,7 @@ TranscriptionStreamTextDeltaTypedDict, ) from mistralai.client.types import BaseModel -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -38,12 +37,12 @@ TranscriptionStreamEventsData = Annotated[ Union[ - Annotated[TranscriptionStreamDone, Tag("transcription.done")], - Annotated[TranscriptionStreamLanguage, Tag("transcription.language")], - Annotated[TranscriptionStreamSegmentDelta, Tag("transcription.segment")], - Annotated[TranscriptionStreamTextDelta, Tag("transcription.text.delta")], + TranscriptionStreamDone, + TranscriptionStreamLanguage, + TranscriptionStreamSegmentDelta, + TranscriptionStreamTextDelta, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Field(discriminator="TYPE"), ] diff --git a/src/mistralai/client/models/transcriptionstreameventtypes.py b/src/mistralai/client/models/transcriptionstreameventtypes.py index 4a910f0a..cb6b2889 100644 --- a/src/mistralai/client/models/transcriptionstreameventtypes.py +++ b/src/mistralai/client/models/transcriptionstreameventtypes.py @@ -1,12 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union -TranscriptionStreamEventTypes = Literal[ - "transcription.language", - "transcription.segment", - "transcription.text.delta", - "transcription.done", +TranscriptionStreamEventTypes = Union[ + Literal[ + "transcription.language", + "transcription.segment", + "transcription.text.delta", + "transcription.done", + ], + UnrecognizedStr, ] diff --git a/src/mistralai/client/models/transcriptionstreamlanguage.py b/src/mistralai/client/models/transcriptionstreamlanguage.py index b47024ad..244103be 100644 --- a/src/mistralai/client/models/transcriptionstreamlanguage.py +++ b/src/mistralai/client/models/transcriptionstreamlanguage.py @@ -2,18 +2,17 @@ from __future__ import annotations from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const import pydantic from pydantic import ConfigDict -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamLanguageType = Literal["transcription.language",] +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal +from typing_extensions import Annotated, TypedDict class TranscriptionStreamLanguageTypedDict(TypedDict): audio_language: str - type: NotRequired[TranscriptionStreamLanguageType] + type: Literal["transcription.language"] class TranscriptionStreamLanguage(BaseModel): @@ -24,7 +23,13 @@ class TranscriptionStreamLanguage(BaseModel): audio_language: str - type: Optional[TranscriptionStreamLanguageType] = "transcription.language" + TYPE: Annotated[ + Annotated[ + Literal["transcription.language"], + AfterValidator(validate_const("transcription.language")), + ], + pydantic.Field(alias="type"), + ] = "transcription.language" @property def additional_properties(self): diff --git a/src/mistralai/client/models/transcriptionstreamsegmentdelta.py b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py index 7cfffb63..ee014742 100644 --- a/src/mistralai/client/models/transcriptionstreamsegmentdelta.py +++ b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py @@ -8,13 +8,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const import pydantic from pydantic import ConfigDict, model_serializer -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamSegmentDeltaType = Literal["transcription.segment",] +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal +from typing_extensions import Annotated, NotRequired, TypedDict class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): @@ -22,7 +21,7 @@ class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): start: float end: float speaker_id: NotRequired[Nullable[str]] - type: NotRequired[TranscriptionStreamSegmentDeltaType] + type: Literal["transcription.segment"] class TranscriptionStreamSegmentDelta(BaseModel): @@ -39,7 +38,13 @@ class TranscriptionStreamSegmentDelta(BaseModel): speaker_id: OptionalNullable[str] = UNSET - type: Optional[TranscriptionStreamSegmentDeltaType] = "transcription.segment" + TYPE: Annotated[ + Annotated[ + Literal["transcription.segment"], + AfterValidator(validate_const("transcription.segment")), + ], + pydantic.Field(alias="type"), + ] = "transcription.segment" @property def additional_properties(self): @@ -51,7 +56,7 @@ def additional_properties(self, value): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["speaker_id", "type"] + optional_fields = ["speaker_id"] nullable_fields = ["speaker_id"] null_default_fields = [] diff --git a/src/mistralai/client/models/transcriptionstreamtextdelta.py b/src/mistralai/client/models/transcriptionstreamtextdelta.py index ce279cf6..feb459ea 100644 --- a/src/mistralai/client/models/transcriptionstreamtextdelta.py +++ b/src/mistralai/client/models/transcriptionstreamtextdelta.py @@ -2,18 +2,17 @@ from __future__ import annotations from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const import pydantic from pydantic import ConfigDict -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamTextDeltaType = Literal["transcription.text.delta",] +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal +from typing_extensions import Annotated, TypedDict class TranscriptionStreamTextDeltaTypedDict(TypedDict): text: str - type: NotRequired[TranscriptionStreamTextDeltaType] + type: Literal["transcription.text.delta"] class TranscriptionStreamTextDelta(BaseModel): @@ -24,7 +23,13 @@ class TranscriptionStreamTextDelta(BaseModel): text: str - type: Optional[TranscriptionStreamTextDeltaType] = "transcription.text.delta" + TYPE: Annotated[ + Annotated[ + Literal["transcription.text.delta"], + AfterValidator(validate_const("transcription.text.delta")), + ], + pydantic.Field(alias="type"), + ] = "transcription.text.delta" @property def additional_properties(self): diff --git a/src/mistralai/client/models/usermessage.py b/src/mistralai/client/models/usermessage.py index 8d92cea8..fe64a8cc 100644 --- a/src/mistralai/client/models/usermessage.py +++ b/src/mistralai/client/models/usermessage.py @@ -3,9 +3,12 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict UserMessageContentTypedDict = TypeAliasType( @@ -16,22 +19,22 @@ UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) -UserMessageRole = Literal["user",] - - class UserMessageTypedDict(TypedDict): content: Nullable[UserMessageContentTypedDict] - role: NotRequired[UserMessageRole] + role: Literal["user"] class UserMessage(BaseModel): content: Nullable[UserMessageContent] - role: Optional[UserMessageRole] = "user" + ROLE: Annotated[ + Annotated[Literal["user"], AfterValidator(validate_const("user"))], + pydantic.Field(alias="role"), + ] = "user" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["role"] + optional_fields = [] nullable_fields = ["content"] null_default_fields = [] diff --git a/src/mistralai/client/models/wandbintegration.py b/src/mistralai/client/models/wandbintegration.py index 89489fb4..18e32ac3 100644 --- a/src/mistralai/client/models/wandbintegration.py +++ b/src/mistralai/client/models/wandbintegration.py @@ -8,12 +8,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WandbIntegrationType = Literal["wandb",] +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, NotRequired, TypedDict class WandbIntegrationTypedDict(TypedDict): @@ -21,7 +21,7 @@ class WandbIntegrationTypedDict(TypedDict): r"""The name of the project that the new run will be created under.""" api_key: str r"""The WandB API key to use for authentication.""" - type: NotRequired[WandbIntegrationType] + type: Literal["wandb"] name: NotRequired[Nullable[str]] r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: NotRequired[Nullable[str]] @@ -34,7 +34,10 @@ class WandbIntegration(BaseModel): api_key: str r"""The WandB API key to use for authentication.""" - type: Optional[WandbIntegrationType] = "wandb" + TYPE: Annotated[ + Annotated[Literal["wandb"], AfterValidator(validate_const("wandb"))], + pydantic.Field(alias="type"), + ] = "wandb" name: OptionalNullable[str] = UNSET r"""A display name to set for the run. If not set, will use the job ID as the name.""" @@ -43,7 +46,7 @@ class WandbIntegration(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "name", "run_name"] + optional_fields = ["name", "run_name"] nullable_fields = ["name", "run_name"] null_default_fields = [] diff --git a/src/mistralai/client/models/wandbintegrationout.py b/src/mistralai/client/models/wandbintegrationout.py index a7f9afeb..6409f4a4 100644 --- a/src/mistralai/client/models/wandbintegrationout.py +++ b/src/mistralai/client/models/wandbintegrationout.py @@ -8,18 +8,18 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WandbIntegrationOutType = Literal["wandb",] +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, NotRequired, TypedDict class WandbIntegrationOutTypedDict(TypedDict): project: str r"""The name of the project that the new run will be created under.""" - type: NotRequired[WandbIntegrationOutType] + type: Literal["wandb"] name: NotRequired[Nullable[str]] r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: NotRequired[Nullable[str]] @@ -30,7 +30,10 @@ class WandbIntegrationOut(BaseModel): project: str r"""The name of the project that the new run will be created under.""" - type: Optional[WandbIntegrationOutType] = "wandb" + TYPE: Annotated[ + Annotated[Literal["wandb"], AfterValidator(validate_const("wandb"))], + pydantic.Field(alias="type"), + ] = "wandb" name: OptionalNullable[str] = UNSET r"""A display name to set for the run. If not set, will use the job ID as the name.""" @@ -41,7 +44,7 @@ class WandbIntegrationOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "name", "run_name", "url"] + optional_fields = ["name", "run_name", "url"] nullable_fields = ["name", "run_name", "url"] null_default_fields = [] diff --git a/src/mistralai/client/models/websearchpremiumtool.py b/src/mistralai/client/models/websearchpremiumtool.py index 8d2d4b5d..c7825ec3 100644 --- a/src/mistralai/client/models/websearchpremiumtool.py +++ b/src/mistralai/client/models/websearchpremiumtool.py @@ -2,16 +2,22 @@ from __future__ import annotations from mistralai.client.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WebSearchPremiumToolType = Literal["web_search_premium",] +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict class WebSearchPremiumToolTypedDict(TypedDict): - type: NotRequired[WebSearchPremiumToolType] + type: Literal["web_search_premium"] class WebSearchPremiumTool(BaseModel): - type: Optional[WebSearchPremiumToolType] = "web_search_premium" + TYPE: Annotated[ + Annotated[ + Literal["web_search_premium"], + AfterValidator(validate_const("web_search_premium")), + ], + pydantic.Field(alias="type"), + ] = "web_search_premium" diff --git a/src/mistralai/client/models/websearchtool.py b/src/mistralai/client/models/websearchtool.py index ba4cc09f..7a237d86 100644 --- a/src/mistralai/client/models/websearchtool.py +++ b/src/mistralai/client/models/websearchtool.py @@ -2,16 +2,19 @@ from __future__ import annotations from mistralai.client.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WebSearchToolType = Literal["web_search",] +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict class WebSearchToolTypedDict(TypedDict): - type: NotRequired[WebSearchToolType] + type: Literal["web_search"] class WebSearchTool(BaseModel): - type: Optional[WebSearchToolType] = "web_search" + TYPE: Annotated[ + Annotated[Literal["web_search"], AfterValidator(validate_const("web_search"))], + pydantic.Field(alias="type"), + ] = "web_search" diff --git a/src/mistralai/client/models_.py b/src/mistralai/client/models_.py index 5ef9da09..00708197 100644 --- a/src/mistralai/client/models_.py +++ b/src/mistralai/client/models_.py @@ -174,7 +174,7 @@ def retrieve( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: + ) -> models.ResponseRetrieveModelV1ModelsModelIDGet: r"""Retrieve Model Retrieve information about a model. @@ -242,8 +242,7 @@ def retrieve( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return unmarshal_json_response( - models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, - http_res, + models.ResponseRetrieveModelV1ModelsModelIDGet, http_res ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( @@ -267,7 +266,7 @@ async def retrieve_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: + ) -> models.ResponseRetrieveModelV1ModelsModelIDGet: r"""Retrieve Model Retrieve information about a model. @@ -335,8 +334,7 @@ async def retrieve_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return unmarshal_json_response( - models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, - http_res, + models.ResponseRetrieveModelV1ModelsModelIDGet, http_res ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( diff --git a/src/mistralai/extra/mcp/base.py b/src/mistralai/extra/mcp/base.py index 1048c54f..115eff61 100644 --- a/src/mistralai/extra/mcp/base.py +++ b/src/mistralai/extra/mcp/base.py @@ -84,7 +84,6 @@ async def get_tools(self) -> list[FunctionTool]: for mcp_tool in mcp_tools.tools: tools.append( FunctionTool( - type="function", function=Function( name=mcp_tool.name, description=mcp_tool.description, diff --git a/src/mistralai/extra/run/context.py b/src/mistralai/extra/run/context.py index 8e570e41..01baa6a9 100644 --- a/src/mistralai/extra/run/context.py +++ b/src/mistralai/extra/run/context.py @@ -22,18 +22,19 @@ create_tool_call, ) from mistralai.client.models import ( + AgentTool, CompletionArgs, CompletionArgsTypedDict, ConversationInputs, ConversationInputsTypedDict, + ConversationRequestTool, + ConversationRequestToolTypedDict, FunctionCallEntry, FunctionResultEntry, FunctionTool, InputEntries, MessageInputEntry, ResponseFormat, - Tools, - ToolsTypedDict, ) from mistralai.client.types.basemodel import BaseModel, OptionalNullable, UNSET @@ -50,7 +51,7 @@ class AgentRequestKwargs(typing.TypedDict): class ModelRequestKwargs(typing.TypedDict): model: str instructions: OptionalNullable[str] - tools: OptionalNullable[list[Tools] | list[ToolsTypedDict]] + tools: OptionalNullable[list[ConversationRequestTool] | list[ConversationRequestToolTypedDict]] completion_args: OptionalNullable[CompletionArgs | CompletionArgsTypedDict] @@ -186,10 +187,9 @@ async def prepare_agent_request(self, beta_client: "Beta") -> AgentRequestKwargs ) agent = await beta_client.agents.get_async(agent_id=self.agent_id) agent_tools = agent.tools or [] - updated_tools = [] - for i in range(len(agent_tools)): - tool = agent_tools[i] - if tool.type != "function": + updated_tools: list[AgentTool] = [] + for tool in agent_tools: + if not isinstance(tool, FunctionTool): updated_tools.append(tool) elif tool.function.name in self._callable_tools: # function already exists in the agent, don't add it again @@ -209,7 +209,7 @@ async def prepare_agent_request(self, beta_client: "Beta") -> AgentRequestKwargs async def prepare_model_request( self, - tools: OptionalNullable[list[Tools] | list[ToolsTypedDict]] = UNSET, + tools: OptionalNullable[list[ConversationRequestTool] | list[ConversationRequestToolTypedDict]] = UNSET, completion_args: OptionalNullable[CompletionArgs | CompletionArgsTypedDict] = UNSET, instructions: OptionalNullable[str] = None, ) -> ModelRequestKwargs: @@ -225,7 +225,7 @@ async def prepare_model_request( request_tools = [] if isinstance(tools, list): for tool in tools: - request_tools.append(typing.cast(Tools, tool)) + request_tools.append(typing.cast(ConversationRequestTool, tool)) for tool in self.get_tools(): request_tools.append(tool) return ModelRequestKwargs( @@ -248,7 +248,7 @@ async def _validate_run( run_ctx: RunContext, inputs: ConversationInputs | ConversationInputsTypedDict, instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[list[Tools] | list[ToolsTypedDict]] = UNSET, + tools: OptionalNullable[list[ConversationRequestTool] | list[ConversationRequestToolTypedDict]] = UNSET, completion_args: OptionalNullable[CompletionArgs | CompletionArgsTypedDict] = UNSET, ) -> tuple[ AgentRequestKwargs | ModelRequestKwargs, RunResult, list[InputEntries] diff --git a/src/mistralai/extra/run/tools.py b/src/mistralai/extra/run/tools.py index 94ef2852..18c1d3dd 100644 --- a/src/mistralai/extra/run/tools.py +++ b/src/mistralai/extra/run/tools.py @@ -168,7 +168,6 @@ def create_tool_call(func: Callable) -> FunctionTool: type_hints = get_type_hints(func, include_extras=True, localns=None, globalns=None) return FunctionTool( - type="function", function=Function( name=name, description=_get_function_description(docstring_sections), diff --git a/uv.lock b/uv.lock index 4b1890b2..caa731ed 100644 --- a/uv.lock +++ b/uv.lock @@ -563,7 +563,7 @@ wheels = [ [[package]] name = "mistralai" -version = "2.0.0a1" +version = "2.0.0a2" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, From 346dc75a6a9db5542b3bd93ffce505b5223b560c Mon Sep 17 00:00:00 2001 From: Nelson PROIA <144663685+Nelson-PROIA@users.noreply.github.com> Date: Fri, 13 Feb 2026 10:46:17 +0100 Subject: [PATCH 24/42] PEP 420 namespace support, widen otel upper bound (#346) PEP 420 namespace support, widen otel upper bound - Widen opentelemetry-semantic-conventions upper bound from <0.60 to <0.61 (fixes #341) - Add "Additional packages" section to README for mistralai-* namespace packages Co-Authored-By: Claude Opus 4.6 --- README.md | 12 ++++++++++-- pyproject.toml | 2 +- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 2f31ccf2..04cb586c 100644 --- a/README.md +++ b/README.md @@ -118,7 +118,7 @@ Once that is saved to a file, you can run it with `uv run script.py` where ### Agents extra dependencies -When using the agents related feature it is required to add the `agents` extra dependencies. This can be added when +When using the agents related feature it is required to add the `agents` extra dependencies. This can be added when installing the package: ```bash @@ -127,6 +127,14 @@ pip install "mistralai[agents]" > Note: These features require Python 3.10+ (the SDK minimum). +### Additional packages + +Additional `mistralai-*` packages (e.g. `mistralai-workflows`) can be installed separately and are available under the `mistralai` namespace: + +```bash +pip install mistralai-workflows +``` + ## SDK Example Usage @@ -410,7 +418,7 @@ gcloud auth application-default login Install the extras dependencies specific to Google Cloud: ```bash -pip install mistralai[gcp] +pip install "mistralai[gcp]" ``` **Step 2: Example Usage** diff --git a/pyproject.toml b/pyproject.toml index 7209c64c..5802feaa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ dependencies = [ "opentelemetry-sdk (>=1.33.1,<2.0.0)", "opentelemetry-api (>=1.33.1,<2.0.0)", "opentelemetry-exporter-otlp-proto-http (>=1.37.0,<2.0.0)", - "opentelemetry-semantic-conventions (>=0.59b0,<0.60)", + "opentelemetry-semantic-conventions (>=0.59b0,<0.61)", ] [project.optional-dependencies] From fc41a0759cab147c99238ccfe94eac8affb54e06 Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Tue, 17 Feb 2026 15:17:20 +0100 Subject: [PATCH 25/42] chore: switch to v2 specs for shorter SDK class names (#352) * chore: add Makefile for SDK generation commands - test-generate: Test SDK generation locally - update-speakeasy-version: Update Speakeasy CLI version Production generation is done via GitHub Actions. * chore: switch SDK generation to v2 specs for shorter class names Update Speakeasy workflow to pull from :v2 registry tags instead of :main. This enables shorter SDK class names via x-mistral-sdk-operation-id. * chore: regenerate SDK with v2 specs (shorter class names) Auto-generated by Speakeasy using v2 spec with short operation IDs. Class names now use concise names like ArchiveModelRequest instead of JobsAPIRoutesFineTuningArchiveFineTunedModelRequest. * chore: bump version to 2.0.0a3 * chore: regenerate SDK for v2.0.0a3 * docs: add shorter class names section to migration guide * fix(examples): use mistral-small-latest for fine-tuning examples * ci: skip fine-tuning job examples (CI API key lacks access) * refactor(examples): rename job examples to clarify fine-tuning vs batch --- .speakeasy/gen.lock | 2892 ++++++++++------- .speakeasy/gen.yaml | 5 +- ...-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock | 799 ----- .speakeasy/workflow.lock | 36 +- .speakeasy/workflow.yaml | 6 +- MIGRATION.md | 13 + Makefile | 24 + README.md | 18 +- docs/models/agent.md | 3 +- docs/models/agentcreationrequest.md | 3 +- docs/models/agentsapiv1agentsgetrequest.md | 9 - docs/models/agentupdaterequest.md | 3 +- ...equest.md => appendconversationrequest.md} | 2 +- ....md => appendconversationstreamrequest.md} | 2 +- docs/models/archiveftmodelout.md | 10 +- docs/models/archiveftmodeloutobject.md | 8 - ...modelrequest.md => archivemodelrequest.md} | 2 +- docs/models/batchjobout.md | 42 +- docs/models/batchjoboutobject.md | 8 - docs/models/batchjobsout.md | 10 +- docs/models/batchjobsoutobject.md | 8 - ...jobrequest.md => cancelbatchjobrequest.md} | 2 +- ...quest.md => cancelfinetuningjobrequest.md} | 2 +- ...onse.md => cancelfinetuningjobresponse.md} | 2 +- docs/models/classifierdetailedjobout.md | 4 +- docs/models/classifierdetailedjoboutobject.md | 8 - docs/models/classifierftmodelout.md | 36 +- docs/models/classifierftmodeloutobject.md | 8 - docs/models/classifierjobout.md | 4 +- docs/models/classifierjoboutobject.md | 10 - docs/models/completiondetailedjobout.md | 4 +- docs/models/completiondetailedjoboutobject.md | 8 - docs/models/completionftmodelout.md | 34 +- docs/models/completionftmodeloutobject.md | 8 - docs/models/completionjobout.md | 4 +- docs/models/completionjoboutobject.md | 10 - ...onse.md => createfinetuningjobresponse.md} | 2 +- ....md => createorupdateagentaliasrequest.md} | 2 +- docs/models/deleteagentaliasrequest.md | 9 + ...deleterequest.md => deleteagentrequest.md} | 2 +- ...equest.md => deleteconversationrequest.md} | 2 +- ...tv1request.md => deletedocumentrequest.md} | 2 +- ...etefilerequest.md => deletefilerequest.md} | 2 +- ...quest.md => deletelibraryaccessrequest.md} | 2 +- ...etv1request.md => deletelibraryrequest.md} | 2 +- ...deleterequest.md => deletemodelrequest.md} | 2 +- ...dfilerequest.md => downloadfilerequest.md} | 2 +- ...gentversion.md => getagentagentversion.md} | 2 +- docs/models/getagentrequest.md | 9 + ...onrequest.md => getagentversionrequest.md} | 2 +- ...tchjobrequest.md => getbatchjobrequest.md} | 2 +- ...st.md => getconversationhistoryrequest.md} | 2 +- ...t.md => getconversationmessagesrequest.md} | 2 +- ...terequest.md => getconversationrequest.md} | 2 +- ...tdocumentextractedtextsignedurlrequest.md} | 2 +- ...letev1request.md => getdocumentrequest.md} | 2 +- ...uest.md => getdocumentsignedurlrequest.md} | 2 +- ...request.md => getdocumentstatusrequest.md} | 2 +- docs/models/getdocumenttextcontentrequest.md | 9 + ...lrequest.md => getfilesignedurlrequest.md} | 2 +- ...brequest.md => getfinetuningjobrequest.md} | 2 +- ...esponse.md => getfinetuningjobresponse.md} | 2 +- ...eletev1request.md => getlibraryrequest.md} | 2 +- docs/models/jobin.md | 2 +- ...outesfinetuninggetfinetuningjobsrequest.md | 17 - docs/models/jobsout.md | 10 +- docs/models/jobsoutobject.md | 8 - docs/models/legacyjobmetadataout.md | 2 +- docs/models/legacyjobmetadataoutobject.md | 8 - ...mentsgetextractedtextsignedurlv1request.md | 9 - ...brariesdocumentsgettextcontentv1request.md | 9 - ...srequest.md => listagentaliasesrequest.md} | 2 +- ...ntslistrequest.md => listagentsrequest.md} | 5 +- ...request.md => listagentversionsrequest.md} | 2 +- ...jobsrequest.md => listbatchjobsrequest.md} | 5 +- ...request.md => listconversationsrequest.md} | 2 +- ...sponse.md => listconversationsresponse.md} | 2 +- ...stv1request.md => listdocumentsrequest.md} | 2 +- ...istfilesrequest.md => listfilesrequest.md} | 2 +- docs/models/listfinetuningjobsrequest.md | 17 + ...sstatus.md => listfinetuningjobsstatus.md} | 2 +- ...quest.md => listlibraryaccessesrequest.md} | 2 +- docs/models/orderby.md | 9 + docs/models/reprocessdocumentrequest.md | 9 + ...quest.md => restartconversationrequest.md} | 2 +- ...md => restartconversationstreamrequest.md} | 2 +- ...efilerequest.md => retrievefilerequest.md} | 2 +- ...dgetrequest.md => retrievemodelrequest.md} | 2 +- ...equest.md => startfinetuningjobrequest.md} | 2 +- ...ponse.md => startfinetuningjobresponse.md} | 2 +- docs/models/unarchiveftmodelout.md | 10 +- docs/models/unarchiveftmodeloutobject.md | 8 - ...delrequest.md => unarchivemodelrequest.md} | 2 +- ...updaterequest.md => updateagentrequest.md} | 2 +- ...equest.md => updateagentversionrequest.md} | 2 +- ...ev1request.md => updatedocumentrequest.md} | 2 +- ...tev1request.md => updatelibraryrequest.md} | 2 +- ...dmodelrequest.md => updatemodelrequest.md} | 2 +- ...odelresponse.md => updatemodelresponse.md} | 2 +- ... => updateorcreatelibraryaccessrequest.md} | 2 +- ...dv1request.md => uploaddocumentrequest.md} | 2 +- docs/sdks/accesses/README.md | 12 +- docs/sdks/batchjobs/README.md | 17 +- docs/sdks/betaagents/README.md | 81 +- docs/sdks/conversations/README.md | 43 +- docs/sdks/documents/README.md | 40 +- docs/sdks/files/README.md | 20 +- docs/sdks/finetuningjobs/README.md | 54 +- docs/sdks/libraries/README.md | 16 +- docs/sdks/models/README.md | 14 +- .../{async_jobs.py => async_fine_tuning.py} | 2 +- ...jobs_chat.py => async_fine_tuning_chat.py} | 2 +- .../mistral/jobs/{jobs.py => fine_tuning.py} | 2 +- ...{dry_run_job.py => fine_tuning_dry_run.py} | 2 +- packages/mistralai_azure/.speakeasy/gen.lock | 28 +- .../docs/models/mistralpromptmode.md | 4 + .../mistralai_azure/docs/models/ocrrequest.md | 1 + .../models/mistralpromptmode.py | 4 + .../src/mistralai_azure/models/ocrrequest.py | 7 + .../src/mistralai_azure/ocr.py | 6 + packages/mistralai_gcp/.speakeasy/gen.lock | 16 +- .../docs/models/mistralpromptmode.md | 4 + .../mistralai_gcp/models/mistralpromptmode.py | 4 + pyproject.toml | 2 +- scripts/run_examples.sh | 5 +- src/mistralai/client/__init__.py | 1 + src/mistralai/client/_hooks/__init__.py | 1 + src/mistralai/client/_hooks/sdkhooks.py | 1 + src/mistralai/client/_hooks/types.py | 1 + src/mistralai/client/_version.py | 5 +- src/mistralai/client/accesses.py | 25 +- src/mistralai/client/agents.py | 1 + src/mistralai/client/audio.py | 1 + src/mistralai/client/basesdk.py | 1 + src/mistralai/client/batch.py | 1 + src/mistralai/client/batch_jobs.py | 36 +- src/mistralai/client/beta.py | 1 + src/mistralai/client/beta_agents.py | 295 +- src/mistralai/client/chat.py | 1 + src/mistralai/client/classifiers.py | 1 + src/mistralai/client/conversations.py | 99 +- src/mistralai/client/documents.py | 81 +- src/mistralai/client/embeddings.py | 1 + src/mistralai/client/files.py | 45 +- src/mistralai/client/fim.py | 1 + src/mistralai/client/fine_tuning.py | 1 + src/mistralai/client/fine_tuning_jobs.py | 95 +- src/mistralai/client/httpclient.py | 1 + src/mistralai/client/libraries.py | 33 +- src/mistralai/client/models/__init__.py | 951 +++--- src/mistralai/client/models/agent.py | 13 +- .../client/models/agentaliasresponse.py | 1 + .../client/models/agentconversation.py | 1 + .../client/models/agentcreationrequest.py | 13 +- .../client/models/agenthandoffdoneevent.py | 1 + .../client/models/agenthandoffentry.py | 1 + .../client/models/agenthandoffstartedevent.py | 1 + .../client/models/agentscompletionrequest.py | 1 + .../models/agentscompletionstreamrequest.py | 1 + .../client/models/agentupdaterequest.py | 6 + src/mistralai/client/models/apiendpoint.py | 1 + ...ns_appendop.py => appendconversationop.py} | 5 +- ...eamop.py => appendconversationstreamop.py} | 5 +- .../client/models/archiveftmodelout.py | 16 +- ...ine_tuned_modelop.py => archivemodelop.py} | 5 +- .../client/models/assistantmessage.py | 1 + src/mistralai/client/models/audiochunk.py | 1 + src/mistralai/client/models/audioencoding.py | 1 + src/mistralai/client/models/audioformat.py | 1 + .../models/audiotranscriptionrequest.py | 1 + .../models/audiotranscriptionrequeststream.py | 1 + src/mistralai/client/models/basemodelcard.py | 1 + src/mistralai/client/models/batcherror.py | 1 + src/mistralai/client/models/batchjobin.py | 1 + src/mistralai/client/models/batchjobout.py | 16 +- src/mistralai/client/models/batchjobsout.py | 16 +- src/mistralai/client/models/batchjobstatus.py | 1 + src/mistralai/client/models/batchrequest.py | 1 + .../client/models/builtinconnectors.py | 1 + ...cel_batch_jobop.py => cancelbatchjobop.py} | 5 +- ...ning_jobop.py => cancelfinetuningjobop.py} | 11 +- .../models/chatclassificationrequest.py | 1 + .../client/models/chatcompletionchoice.py | 1 + .../client/models/chatcompletionrequest.py | 1 + .../client/models/chatcompletionresponse.py | 1 + .../models/chatcompletionstreamrequest.py | 1 + .../client/models/chatmoderationrequest.py | 1 + src/mistralai/client/models/checkpointout.py | 1 + .../client/models/classificationrequest.py | 1 + .../client/models/classificationresponse.py | 1 + .../models/classificationtargetresult.py | 1 + .../client/models/classifierdetailedjobout.py | 13 +- .../client/models/classifierftmodelout.py | 11 +- .../client/models/classifierjobout.py | 14 +- .../client/models/classifiertargetin.py | 1 + .../client/models/classifiertargetout.py | 1 + .../models/classifiertrainingparameters.py | 1 + .../models/classifiertrainingparametersin.py | 1 + .../client/models/codeinterpretertool.py | 1 + src/mistralai/client/models/completionargs.py | 1 + .../client/models/completionargsstop.py | 1 + .../client/models/completionchunk.py | 1 + .../client/models/completiondetailedjobout.py | 13 +- .../client/models/completionevent.py | 1 + .../client/models/completionftmodelout.py | 11 +- .../client/models/completionjobout.py | 14 +- .../models/completionresponsestreamchoice.py | 1 + .../models/completiontrainingparameters.py | 1 + .../models/completiontrainingparametersin.py | 1 + src/mistralai/client/models/contentchunk.py | 1 + .../models/conversationappendrequest.py | 1 + .../models/conversationappendstreamrequest.py | 1 + .../client/models/conversationevents.py | 1 + .../client/models/conversationhistory.py | 1 + .../client/models/conversationinputs.py | 1 + .../client/models/conversationmessages.py | 1 + .../client/models/conversationrequest.py | 1 + .../client/models/conversationresponse.py | 1 + .../models/conversationrestartrequest.py | 1 + .../conversationrestartstreamrequest.py | 1 + .../models/conversationstreamrequest.py | 1 + .../client/models/conversationusageinfo.py | 1 + ...ning_jobop.py => createfinetuningjobop.py} | 10 +- ...iasop.py => createorupdateagentaliasop.py} | 5 +- .../client/models/deleteagentaliasop.py | 22 + ...v1_agents_deleteop.py => deleteagentop.py} | 5 +- ...ns_deleteop.py => deleteconversationop.py} | 5 +- ..._reprocess_v1op.py => deletedocumentop.py} | 5 +- ...outes_delete_fileop.py => deletefileop.py} | 5 +- src/mistralai/client/models/deletefileout.py | 1 + ...elete_v1op.py => deletelibraryaccessop.py} | 5 +- ...braries_get_v1op.py => deletelibraryop.py} | 5 +- ..._model_id_deleteop.py => deletemodelop.py} | 5 +- src/mistralai/client/models/deletemodelout.py | 1 + src/mistralai/client/models/deltamessage.py | 1 + .../client/models/documentlibrarytool.py | 1 + src/mistralai/client/models/documentout.py | 1 + .../client/models/documenttextcontent.py | 1 + .../client/models/documentupdatein.py | 1 + .../client/models/documenturlchunk.py | 1 + ...s_download_fileop.py => downloadfileop.py} | 5 +- src/mistralai/client/models/embeddingdtype.py | 1 + .../client/models/embeddingrequest.py | 1 + .../client/models/embeddingresponse.py | 1 + .../client/models/embeddingresponsedata.py | 1 + src/mistralai/client/models/encodingformat.py | 1 + src/mistralai/client/models/entitytype.py | 1 + src/mistralai/client/models/eventout.py | 1 + src/mistralai/client/models/file.py | 1 + src/mistralai/client/models/filechunk.py | 1 + src/mistralai/client/models/filepurpose.py | 1 + src/mistralai/client/models/fileschema.py | 1 + src/mistralai/client/models/filesignedurl.py | 1 + .../client/models/fimcompletionrequest.py | 1 + .../client/models/fimcompletionresponse.py | 1 + .../models/fimcompletionstreamrequest.py | 1 + .../client/models/finetuneablemodeltype.py | 1 + .../client/models/ftclassifierlossfunction.py | 1 + .../client/models/ftmodelcapabilitiesout.py | 1 + src/mistralai/client/models/ftmodelcard.py | 1 + src/mistralai/client/models/function.py | 1 + src/mistralai/client/models/functioncall.py | 1 + .../client/models/functioncallentry.py | 1 + .../models/functioncallentryarguments.py | 1 + .../client/models/functioncallevent.py | 1 + src/mistralai/client/models/functionname.py | 1 + .../client/models/functionresultentry.py | 1 + src/mistralai/client/models/functiontool.py | 1 + ...s_api_v1_agents_getop.py => getagentop.py} | 17 +- ..._get_versionop.py => getagentversionop.py} | 5 +- ...ch_get_batch_jobop.py => getbatchjobop.py} | 5 +- ...storyop.py => getconversationhistoryop.py} | 5 +- ...agesop.py => getconversationmessagesop.py} | 5 +- ...rsations_getop.py => getconversationop.py} | 5 +- ...=> getdocumentextractedtextsignedurlop.py} | 5 +- ...uments_delete_v1op.py => getdocumentop.py} | 5 +- ...atus_v1op.py => getdocumentsignedurlop.py} | 5 +- ...ned_url_v1op.py => getdocumentstatusop.py} | 5 +- ...nt_v1op.py => getdocumenttextcontentop.py} | 5 +- ..._signed_urlop.py => getfilesignedurlop.py} | 5 +- ..._tuning_jobop.py => getfinetuningjobop.py} | 11 +- ...braries_delete_v1op.py => getlibraryop.py} | 5 +- .../client/models/githubrepositoryin.py | 1 + .../client/models/githubrepositoryout.py | 1 + .../client/models/httpvalidationerror.py | 1 + .../client/models/imagegenerationtool.py | 1 + src/mistralai/client/models/imageurl.py | 1 + src/mistralai/client/models/imageurlchunk.py | 1 + src/mistralai/client/models/inputentries.py | 1 + src/mistralai/client/models/inputs.py | 1 + .../client/models/instructrequest.py | 1 + src/mistralai/client/models/jobin.py | 3 +- src/mistralai/client/models/jobmetadataout.py | 1 + src/mistralai/client/models/jobsout.py | 14 +- src/mistralai/client/models/jsonschema.py | 1 + .../client/models/legacyjobmetadataout.py | 19 +- src/mistralai/client/models/libraryin.py | 1 + .../client/models/libraryinupdate.py | 1 + src/mistralai/client/models/libraryout.py | 1 + ...ion_aliasesop.py => listagentaliasesop.py} | 5 +- ...pi_v1_agents_listop.py => listagentsop.py} | 25 +- ...t_versionsop.py => listagentversionsop.py} | 5 +- ...get_batch_jobsop.py => listbatchjobsop.py} | 20 +- ...tions_listop.py => listconversationsop.py} | 13 +- .../client/models/listdocumentout.py | 1 + ...uments_list_v1op.py => listdocumentsop.py} | 5 +- ..._routes_list_filesop.py => listfilesop.py} | 5 +- src/mistralai/client/models/listfilesout.py | 1 + ...ning_jobsop.py => listfinetuningjobsop.py} | 11 +- ..._list_v1op.py => listlibraryaccessesop.py} | 5 +- src/mistralai/client/models/listlibraryout.py | 1 + src/mistralai/client/models/listsharingout.py | 1 + src/mistralai/client/models/messageentries.py | 1 + .../models/messageinputcontentchunks.py | 1 + .../client/models/messageinputentry.py | 1 + .../models/messageoutputcontentchunks.py | 1 + .../client/models/messageoutputentry.py | 1 + .../client/models/messageoutputevent.py | 1 + src/mistralai/client/models/metricout.py | 1 + src/mistralai/client/models/mistralerror.py | 1 + .../client/models/mistralpromptmode.py | 1 + .../client/models/modelcapabilities.py | 1 + .../client/models/modelconversation.py | 1 + src/mistralai/client/models/modellist.py | 1 + .../client/models/moderationobject.py | 1 + .../client/models/moderationresponse.py | 1 + .../client/models/no_response_error.py | 1 + src/mistralai/client/models/ocrimageobject.py | 1 + .../client/models/ocrpagedimensions.py | 1 + src/mistralai/client/models/ocrpageobject.py | 1 + src/mistralai/client/models/ocrrequest.py | 1 + src/mistralai/client/models/ocrresponse.py | 1 + src/mistralai/client/models/ocrtableobject.py | 1 + src/mistralai/client/models/ocrusageinfo.py | 1 + .../client/models/outputcontentchunks.py | 1 + src/mistralai/client/models/paginationinfo.py | 1 + src/mistralai/client/models/prediction.py | 1 + .../client/models/processingstatusout.py | 1 + .../models/realtimetranscriptionerror.py | 1 + .../realtimetranscriptionerrordetail.py | 1 + .../models/realtimetranscriptionsession.py | 1 + .../realtimetranscriptionsessioncreated.py | 1 + .../realtimetranscriptionsessionupdated.py | 1 + src/mistralai/client/models/referencechunk.py | 1 + ...nts_get_v1op.py => reprocessdocumentop.py} | 5 +- src/mistralai/client/models/requestsource.py | 1 + .../client/models/responsedoneevent.py | 1 + .../client/models/responseerrorevent.py | 1 + src/mistralai/client/models/responseformat.py | 1 + .../client/models/responseformats.py | 1 + .../client/models/responsestartedevent.py | 1 + .../client/models/responsevalidationerror.py | 1 + ..._restartop.py => restartconversationop.py} | 5 +- ...amop.py => restartconversationstreamop.py} | 5 +- ...s_retrieve_fileop.py => retrievefileop.py} | 5 +- .../client/models/retrievefileout.py | 1 + ...s_model_id_getop.py => retrievemodelop.py} | 5 +- src/mistralai/client/models/sampletype.py | 1 + src/mistralai/client/models/sdkerror.py | 1 + src/mistralai/client/models/security.py | 1 + src/mistralai/client/models/shareenum.py | 1 + src/mistralai/client/models/sharingdelete.py | 1 + src/mistralai/client/models/sharingin.py | 1 + src/mistralai/client/models/sharingout.py | 1 + src/mistralai/client/models/source.py | 1 + src/mistralai/client/models/ssetypes.py | 1 + ...uning_jobop.py => startfinetuningjobop.py} | 11 +- src/mistralai/client/models/systemmessage.py | 1 + .../models/systemmessagecontentchunks.py | 1 + src/mistralai/client/models/textchunk.py | 1 + src/mistralai/client/models/thinkchunk.py | 1 + .../client/models/timestampgranularity.py | 1 + src/mistralai/client/models/tool.py | 1 + src/mistralai/client/models/toolcall.py | 1 + src/mistralai/client/models/toolchoice.py | 1 + src/mistralai/client/models/toolchoiceenum.py | 1 + .../client/models/toolexecutiondeltaevent.py | 1 + .../client/models/toolexecutiondoneevent.py | 1 + .../client/models/toolexecutionentry.py | 1 + .../models/toolexecutionstartedevent.py | 1 + src/mistralai/client/models/toolfilechunk.py | 1 + src/mistralai/client/models/toolmessage.py | 1 + .../client/models/toolreferencechunk.py | 1 + src/mistralai/client/models/tooltypes.py | 1 + src/mistralai/client/models/trainingfile.py | 1 + .../client/models/transcriptionresponse.py | 1 + .../models/transcriptionsegmentchunk.py | 1 + .../client/models/transcriptionstreamdone.py | 1 + .../models/transcriptionstreamevents.py | 1 + .../models/transcriptionstreameventtypes.py | 1 + .../models/transcriptionstreamlanguage.py | 1 + .../models/transcriptionstreamsegmentdelta.py | 1 + .../models/transcriptionstreamtextdelta.py | 1 + .../client/models/unarchiveftmodelout.py | 16 +- ...e_tuned_modelop.py => unarchivemodelop.py} | 5 +- ...v1_agents_updateop.py => updateagentop.py} | 5 +- ...e_versionop.py => updateagentversionop.py} | 5 +- ...nts_update_v1op.py => updatedocumentop.py} | 5 +- .../client/models/updateftmodelin.py | 1 + ...ries_update_v1op.py => updatelibraryop.py} | 5 +- ...fine_tuned_modelop.py => updatemodelop.py} | 11 +- ...op.py => updateorcreatelibraryaccessop.py} | 5 +- ...nts_upload_v1op.py => uploaddocumentop.py} | 5 +- ...outes_upload_fileop.py => uploadfileop.py} | 1 + src/mistralai/client/models/uploadfileout.py | 1 + src/mistralai/client/models/usageinfo.py | 1 + src/mistralai/client/models/usermessage.py | 1 + .../client/models/validationerror.py | 1 + .../client/models/wandbintegration.py | 1 + .../client/models/wandbintegrationout.py | 1 + .../client/models/websearchpremiumtool.py | 1 + src/mistralai/client/models/websearchtool.py | 1 + src/mistralai/client/models_.py | 57 +- src/mistralai/client/ocr.py | 1 + src/mistralai/client/sdk.py | 6 +- src/mistralai/client/sdkconfiguration.py | 1 + src/mistralai/client/transcriptions.py | 1 + src/mistralai/client/types/__init__.py | 1 + src/mistralai/client/types/basemodel.py | 1 + src/mistralai/client/utils/__init__.py | 1 + src/mistralai/client/utils/annotations.py | 1 + src/mistralai/client/utils/datetimes.py | 1 + src/mistralai/client/utils/enums.py | 1 + src/mistralai/client/utils/eventstreaming.py | 1 + src/mistralai/client/utils/forms.py | 1 + src/mistralai/client/utils/headers.py | 1 + src/mistralai/client/utils/logger.py | 1 + src/mistralai/client/utils/metadata.py | 1 + src/mistralai/client/utils/queryparams.py | 1 + src/mistralai/client/utils/requestbodies.py | 1 + src/mistralai/client/utils/retries.py | 1 + src/mistralai/client/utils/security.py | 1 + src/mistralai/client/utils/serializers.py | 1 + .../client/utils/unmarshal_json_response.py | 1 + src/mistralai/client/utils/url.py | 1 + src/mistralai/client/utils/values.py | 1 + uv.lock | 2 +- 437 files changed, 3809 insertions(+), 3413 deletions(-) delete mode 100644 .speakeasy/generated-files-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock create mode 100644 Makefile delete mode 100644 docs/models/agentsapiv1agentsgetrequest.md rename docs/models/{agentsapiv1conversationsappendrequest.md => appendconversationrequest.md} (96%) rename docs/models/{agentsapiv1conversationsappendstreamrequest.md => appendconversationstreamrequest.md} (96%) delete mode 100644 docs/models/archiveftmodeloutobject.md rename docs/models/{jobsapiroutesfinetuningarchivefinetunedmodelrequest.md => archivemodelrequest.md} (93%) delete mode 100644 docs/models/batchjoboutobject.md delete mode 100644 docs/models/batchjobsoutobject.md rename docs/models/{jobsapiroutesbatchcancelbatchjobrequest.md => cancelbatchjobrequest.md} (86%) rename docs/models/{jobsapiroutesfinetuningcancelfinetuningjobrequest.md => cancelfinetuningjobrequest.md} (88%) rename docs/models/{jobsapiroutesfinetuninggetfinetuningjobresponse.md => cancelfinetuningjobresponse.md} (83%) delete mode 100644 docs/models/classifierdetailedjoboutobject.md delete mode 100644 docs/models/classifierftmodeloutobject.md delete mode 100644 docs/models/classifierjoboutobject.md delete mode 100644 docs/models/completiondetailedjoboutobject.md delete mode 100644 docs/models/completionftmodeloutobject.md delete mode 100644 docs/models/completionjoboutobject.md rename docs/models/{jobsapiroutesfinetuningcreatefinetuningjobresponse.md => createfinetuningjobresponse.md} (80%) rename docs/models/{agentsapiv1agentscreateorupdatealiasrequest.md => createorupdateagentaliasrequest.md} (90%) create mode 100644 docs/models/deleteagentaliasrequest.md rename docs/models/{agentsapiv1agentsdeleterequest.md => deleteagentrequest.md} (89%) rename docs/models/{agentsapiv1conversationsgetrequest.md => deleteconversationrequest.md} (95%) rename docs/models/{librariesdocumentsgetv1request.md => deletedocumentrequest.md} (91%) rename docs/models/{filesapiroutesdeletefilerequest.md => deletefilerequest.md} (88%) rename docs/models/{librariessharedeletev1request.md => deletelibraryaccessrequest.md} (96%) rename docs/models/{librariesgetv1request.md => deletelibraryrequest.md} (91%) rename docs/models/{deletemodelv1modelsmodeliddeleterequest.md => deletemodelrequest.md} (94%) rename docs/models/{filesapiroutesdownloadfilerequest.md => downloadfilerequest.md} (88%) rename docs/models/{agentsapiv1agentsgetagentversion.md => getagentagentversion.md} (79%) create mode 100644 docs/models/getagentrequest.md rename docs/models/{agentsapiv1agentsgetversionrequest.md => getagentversionrequest.md} (90%) rename docs/models/{jobsapiroutesbatchgetbatchjobrequest.md => getbatchjobrequest.md} (92%) rename docs/models/{agentsapiv1conversationshistoryrequest.md => getconversationhistoryrequest.md} (94%) rename docs/models/{agentsapiv1conversationsmessagesrequest.md => getconversationmessagesrequest.md} (94%) rename docs/models/{agentsapiv1conversationsdeleterequest.md => getconversationrequest.md} (95%) rename docs/models/{librariesdocumentsgetsignedurlv1request.md => getdocumentextractedtextsignedurlrequest.md} (89%) rename docs/models/{librariesdocumentsdeletev1request.md => getdocumentrequest.md} (90%) rename docs/models/{librariesdocumentsreprocessv1request.md => getdocumentsignedurlrequest.md} (90%) rename docs/models/{librariesdocumentsgetstatusv1request.md => getdocumentstatusrequest.md} (90%) create mode 100644 docs/models/getdocumenttextcontentrequest.md rename docs/models/{filesapiroutesgetsignedurlrequest.md => getfilesignedurlrequest.md} (96%) rename docs/models/{jobsapiroutesfinetuninggetfinetuningjobrequest.md => getfinetuningjobrequest.md} (89%) rename docs/models/{jobsapiroutesfinetuningstartfinetuningjobresponse.md => getfinetuningjobresponse.md} (82%) rename docs/models/{librariesdeletev1request.md => getlibraryrequest.md} (90%) delete mode 100644 docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md delete mode 100644 docs/models/jobsoutobject.md delete mode 100644 docs/models/legacyjobmetadataoutobject.md delete mode 100644 docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md delete mode 100644 docs/models/librariesdocumentsgettextcontentv1request.md rename docs/models/{agentsapiv1agentslistversionaliasesrequest.md => listagentaliasesrequest.md} (85%) rename docs/models/{agentsapiv1agentslistrequest.md => listagentsrequest.md} (84%) rename docs/models/{agentsapiv1agentslistversionsrequest.md => listagentversionsrequest.md} (94%) rename docs/models/{jobsapiroutesbatchgetbatchjobsrequest.md => listbatchjobsrequest.md} (90%) rename docs/models/{agentsapiv1conversationslistrequest.md => listconversationsrequest.md} (92%) rename docs/models/{agentsapiv1conversationslistresponse.md => listconversationsresponse.md} (84%) rename docs/models/{librariesdocumentslistv1request.md => listdocumentsrequest.md} (96%) rename docs/models/{filesapirouteslistfilesrequest.md => listfilesrequest.md} (98%) create mode 100644 docs/models/listfinetuningjobsrequest.md rename docs/models/{jobsapiroutesfinetuninggetfinetuningjobsstatus.md => listfinetuningjobsstatus.md} (94%) rename docs/models/{librariessharelistv1request.md => listlibraryaccessesrequest.md} (90%) create mode 100644 docs/models/orderby.md create mode 100644 docs/models/reprocessdocumentrequest.md rename docs/models/{agentsapiv1conversationsrestartrequest.md => restartconversationrequest.md} (96%) rename docs/models/{agentsapiv1conversationsrestartstreamrequest.md => restartconversationstreamrequest.md} (96%) rename docs/models/{filesapiroutesretrievefilerequest.md => retrievefilerequest.md} (88%) rename docs/models/{retrievemodelv1modelsmodelidgetrequest.md => retrievemodelrequest.md} (94%) rename docs/models/{jobsapiroutesfinetuningstartfinetuningjobrequest.md => startfinetuningjobrequest.md} (84%) rename docs/models/{jobsapiroutesfinetuningcancelfinetuningjobresponse.md => startfinetuningjobresponse.md} (82%) delete mode 100644 docs/models/unarchiveftmodeloutobject.md rename docs/models/{jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md => unarchivemodelrequest.md} (92%) rename docs/models/{agentsapiv1agentsupdaterequest.md => updateagentrequest.md} (96%) rename docs/models/{agentsapiv1agentsupdateversionrequest.md => updateagentversionrequest.md} (89%) rename docs/models/{librariesdocumentsupdatev1request.md => updatedocumentrequest.md} (97%) rename docs/models/{librariesupdatev1request.md => updatelibraryrequest.md} (97%) rename docs/models/{jobsapiroutesfinetuningupdatefinetunedmodelrequest.md => updatemodelrequest.md} (95%) rename docs/models/{jobsapiroutesfinetuningupdatefinetunedmodelresponse.md => updatemodelresponse.md} (81%) rename docs/models/{librariessharecreatev1request.md => updateorcreatelibraryaccessrequest.md} (95%) rename docs/models/{librariesdocumentsuploadv1request.md => uploaddocumentrequest.md} (96%) rename examples/mistral/jobs/{async_jobs.py => async_fine_tuning.py} (97%) rename examples/mistral/jobs/{async_jobs_chat.py => async_fine_tuning_chat.py} (99%) rename examples/mistral/jobs/{jobs.py => fine_tuning.py} (97%) rename examples/mistral/jobs/{dry_run_job.py => fine_tuning_dry_run.py} (97%) rename src/mistralai/client/models/{agents_api_v1_conversations_appendop.py => appendconversationop.py} (87%) rename src/mistralai/client/models/{agents_api_v1_conversations_append_streamop.py => appendconversationstreamop.py} (87%) rename src/mistralai/client/models/{jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py => archivemodelop.py} (76%) rename src/mistralai/client/models/{jobs_api_routes_batch_cancel_batch_jobop.py => cancelbatchjobop.py} (76%) rename src/mistralai/client/models/{jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py => cancelfinetuningjobop.py} (73%) rename src/mistralai/client/models/{jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py => createfinetuningjobop.py} (71%) rename src/mistralai/client/models/{agents_api_v1_agents_create_or_update_aliasop.py => createorupdateagentaliasop.py} (83%) create mode 100644 src/mistralai/client/models/deleteagentaliasop.py rename src/mistralai/client/models/{agents_api_v1_agents_deleteop.py => deleteagentop.py} (78%) rename src/mistralai/client/models/{agents_api_v1_conversations_deleteop.py => deleteconversationop.py} (81%) rename src/mistralai/client/models/{libraries_documents_reprocess_v1op.py => deletedocumentop.py} (82%) rename src/mistralai/client/models/{files_api_routes_delete_fileop.py => deletefileop.py} (78%) rename src/mistralai/client/models/{libraries_share_delete_v1op.py => deletelibraryaccessop.py} (83%) rename src/mistralai/client/models/{libraries_get_v1op.py => deletelibraryop.py} (77%) rename src/mistralai/client/models/{delete_model_v1_models_model_id_deleteop.py => deletemodelop.py} (79%) rename src/mistralai/client/models/{files_api_routes_download_fileop.py => downloadfileop.py} (77%) rename src/mistralai/client/models/{agents_api_v1_agents_getop.py => getagentop.py} (77%) rename src/mistralai/client/models/{agents_api_v1_agents_get_versionop.py => getagentversionop.py} (81%) rename src/mistralai/client/models/{jobs_api_routes_batch_get_batch_jobop.py => getbatchjobop.py} (93%) rename src/mistralai/client/models/{agents_api_v1_conversations_historyop.py => getconversationhistoryop.py} (80%) rename src/mistralai/client/models/{agents_api_v1_conversations_messagesop.py => getconversationmessagesop.py} (80%) rename src/mistralai/client/models/{agents_api_v1_conversations_getop.py => getconversationop.py} (90%) rename src/mistralai/client/models/{libraries_documents_get_extracted_text_signed_url_v1op.py => getdocumentextractedtextsignedurlop.py} (77%) rename src/mistralai/client/models/{libraries_documents_delete_v1op.py => getdocumentop.py} (82%) rename src/mistralai/client/models/{libraries_documents_get_status_v1op.py => getdocumentsignedurlop.py} (80%) rename src/mistralai/client/models/{libraries_documents_get_signed_url_v1op.py => getdocumentstatusop.py} (81%) rename src/mistralai/client/models/{libraries_documents_get_text_content_v1op.py => getdocumenttextcontentop.py} (80%) rename src/mistralai/client/models/{files_api_routes_get_signed_urlop.py => getfilesignedurlop.py} (86%) rename src/mistralai/client/models/{jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py => getfinetuningjobop.py} (74%) rename src/mistralai/client/models/{libraries_delete_v1op.py => getlibraryop.py} (78%) rename src/mistralai/client/models/{agents_api_v1_agents_list_version_aliasesop.py => listagentaliasesop.py} (75%) rename src/mistralai/client/models/{agents_api_v1_agents_listop.py => listagentsop.py} (82%) rename src/mistralai/client/models/{agents_api_v1_agents_list_versionsop.py => listagentversionsop.py} (88%) rename src/mistralai/client/models/{jobs_api_routes_batch_get_batch_jobsop.py => listbatchjobsop.py} (87%) rename src/mistralai/client/models/{agents_api_v1_conversations_listop.py => listconversationsop.py} (85%) rename src/mistralai/client/models/{libraries_documents_list_v1op.py => listdocumentsop.py} (95%) rename src/mistralai/client/models/{files_api_routes_list_filesop.py => listfilesop.py} (96%) rename src/mistralai/client/models/{jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py => listfinetuningjobsop.py} (93%) rename src/mistralai/client/models/{libraries_share_list_v1op.py => listlibraryaccessesop.py} (76%) rename src/mistralai/client/models/{libraries_documents_get_v1op.py => reprocessdocumentop.py} (81%) rename src/mistralai/client/models/{agents_api_v1_conversations_restartop.py => restartconversationop.py} (87%) rename src/mistralai/client/models/{agents_api_v1_conversations_restart_streamop.py => restartconversationstreamop.py} (87%) rename src/mistralai/client/models/{files_api_routes_retrieve_fileop.py => retrievefileop.py} (77%) rename src/mistralai/client/models/{retrieve_model_v1_models_model_id_getop.py => retrievemodelop.py} (89%) rename src/mistralai/client/models/{jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py => startfinetuningjobop.py} (72%) rename src/mistralai/client/models/{jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py => unarchivemodelop.py} (76%) rename src/mistralai/client/models/{agents_api_v1_agents_updateop.py => updateagentop.py} (86%) rename src/mistralai/client/models/{agents_api_v1_agents_update_versionop.py => updateagentversionop.py} (81%) rename src/mistralai/client/models/{libraries_documents_update_v1op.py => updatedocumentop.py} (87%) rename src/mistralai/client/models/{libraries_update_v1op.py => updatelibraryop.py} (85%) rename src/mistralai/client/models/{jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py => updatemodelop.py} (77%) rename src/mistralai/client/models/{libraries_share_create_v1op.py => updateorcreatelibraryaccessop.py} (81%) rename src/mistralai/client/models/{libraries_documents_upload_v1op.py => uploaddocumentop.py} (92%) rename src/mistralai/client/models/{files_api_routes_upload_fileop.py => uploadfileop.py} (97%) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 69828bd7..6e86c59c 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,19 +1,19 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: e4b3b07fe28f4666261325e923d6c5d9 + docChecksum: 2d9e4f612e5caf84349ab02663eee66e docVersion: 1.0.0 speakeasyVersion: 1.685.0 generationVersion: 2.794.1 - releaseVersion: 2.0.0a1 - configChecksum: d5e0f55b62bca3e8aab33c7955415e61 + releaseVersion: 2.0.0a3 + configChecksum: 7fc1ba01c21def8447b979e71593af4a repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true persistentEdits: - generation_id: b2306c28-6200-44c1-a856-ddd318359c15 - pristine_commit_hash: dc36861e5d8b9f4c91221be8f09dc13254755c9a - pristine_tree_hash: 640358903b623a1b0d7deabbb43f39e82676a1a1 + generation_id: 3aa9018f-cb6c-4c1b-96d0-b832fd5f6513 + pristine_commit_hash: 5c4e3b65b7572c91338d50dc3ca91ea6a46eedf7 + pristine_tree_hash: aaea604044e12872107c3b550ea7be094fb66a99 features: python: additionalDependencies: 1.0.0 @@ -62,8 +62,8 @@ trackedFiles: pristine_git_object: 1810386448a440cfc5f7b8579695b228ae40460d docs/models/agent.md: id: ffdbb4c53c87 - last_write_checksum: sha1:26d2fb743d3fdd54a6ab1258a37f08d1726927ac - pristine_git_object: ee054dd349848eff144d7064c319c3c8434bdc6c + last_write_checksum: sha1:4538aaa78a09b7e33db405f84916b1eb82f94bca + pristine_git_object: e335d889cdb70f4d3c987827ff714db90418cb39 docs/models/agentaliasresponse.md: id: 5ac4721d8947 last_write_checksum: sha1:15dcc6820e89d2c6bb799e331463419ce29ec167 @@ -82,14 +82,16 @@ trackedFiles: pristine_git_object: ea7cc75c5197ed42f9fb508a969baa16effe1f98 docs/models/agentcreationrequest.md: id: 697a770fe5c0 - last_write_checksum: sha1:d77c75f922c64df266b101a2fd23c7fe56b7894b - pristine_git_object: afc27d3b688f9ca187606243c810fd19d12bb840 + last_write_checksum: sha1:b3f12ca0a6356e657de2941c8441fc951bcc96f4 + pristine_git_object: f0f0fdbc13f8f490ded4f8df3944250aece1311b docs/models/agentcreationrequesttool.md: + id: 392d970ffb74 last_write_checksum: sha1:310d4b107554a9c16143191fdc306a5438b63768 + pristine_git_object: b3bd7fa3cead0a0a1480b0e1b3f0afbfd177b600 docs/models/agenthandoffdoneevent.md: id: dcf166a3c3b0 last_write_checksum: sha1:9e95c09f724827f5e9c202fd634bdfa2baef1b6e - pristine_git_object: c0039f41825e3667cd8e91adae5bb78a2e3ac8ae + pristine_git_object: 6bfcc3d83457edf05d0f13957d34ead0f260599b docs/models/agenthandoffentry.md: id: 39d54f489b84 last_write_checksum: sha1:7d949e750fd24dea20cabae340f9204d8f756008 @@ -105,93 +107,19 @@ trackedFiles: docs/models/agenthandoffstartedevent.md: id: b620102af460 last_write_checksum: sha1:33732e0465423348c2ace458506a597a3dadf9b2 - pristine_git_object: 035cd02aaf338785d9f6410fde248591c5ffa5f7 + pristine_git_object: 518b5a0c4521ec55a5a28ba3ef0ad1c1fce52792 docs/models/agentobject.md: id: ed24a6d647a0 last_write_checksum: sha1:ff5dfde6cc19f09c83afb5b4f0f103096df6691d pristine_git_object: 70e143b030d3041c7538ecdacb8f5f9f8d1b5c92 - docs/models/agentsapiv1agentscreateorupdatealiasrequest.md: - id: c09ec9946094 - last_write_checksum: sha1:0883217b4bad21f5d4f8162ca72005bf9105a93f - pristine_git_object: 79406434cc6ff3d1485089f35639d6284f66d6cb - docs/models/agentsapiv1agentsdeleterequest.md: - id: 0faaaa59add9 - last_write_checksum: sha1:2a34269e682bb910b83814b4d730ba2ce07f8cb2 - pristine_git_object: 2799f41817ab0f7a22b49b4ff895c8308525953c - docs/models/agentsapiv1agentsgetagentversion.md: - last_write_checksum: sha1:e4f4c6a64b1c2ec9465b7ad008df4d7859098e59 - docs/models/agentsapiv1agentsgetrequest.md: - id: 01740ae62cff - last_write_checksum: sha1:bc86e90289ec09b40212083a82455b4fe71c7194 - pristine_git_object: c71d4419afd3b51713e154b8021d4fe2b49d8af5 - docs/models/agentsapiv1agentsgetversionrequest.md: - id: 88ed22b85cde - last_write_checksum: sha1:0ef23807c8efa2662144da66745045abdd2cb60a - pristine_git_object: 96a7358943a69e871a2bb7f0f30d6fe2bb8dff3d - docs/models/agentsapiv1agentslistrequest.md: - id: c2720c209527 - last_write_checksum: sha1:cb599d1583ee9374d44695f5ee7efe79dbb8a503 - pristine_git_object: 8cba13253d42a180b06eab8c10297ef362fb434d - docs/models/agentsapiv1agentslistversionaliasesrequest.md: - id: 69c8bce2c017 - last_write_checksum: sha1:4083fc80627b2cc04fd271df21393944730ef1ba - pristine_git_object: 3083bf92641404738948cd57306eac978b701551 - docs/models/agentsapiv1agentslistversionsrequest.md: - id: 0bc44ed8d6bb - last_write_checksum: sha1:315790552fc5b2b3a6c4f7be2eb33100133abe18 - pristine_git_object: 91831700bed92cb4f609f8c412dcb0ee98b544ca - docs/models/agentsapiv1agentsupdaterequest.md: - id: 7692812cd677 - last_write_checksum: sha1:8b17ce9d488b5eab892b66ca44d0e0a01b56aa11 - pristine_git_object: f60f8e5ba0cc6923935187ba221875d757c4693e - docs/models/agentsapiv1agentsupdateversionrequest.md: - id: a001251b1624 - last_write_checksum: sha1:0ee9e0fc55fd969f2b8f2c55dec93bf10e0e5b2f - pristine_git_object: e937acc9b1d3f50eee69495b1305f7aee1c960ac - docs/models/agentsapiv1conversationsappendrequest.md: - id: 70f76380e810 - last_write_checksum: sha1:d428dc114b60362d269b5ae50a57ea60b9edee1a - pristine_git_object: ac8a00ecab30305de8eb8c7c08cda1b1c04148c3 - docs/models/agentsapiv1conversationsappendstreamrequest.md: - id: f6ada9a592c5 - last_write_checksum: sha1:8a806ca2e5bad75d9d0cf50726dc0d5b8e7e3eab - pristine_git_object: dbc330f11aa3039c9cea2dd7d477d56d5c4969d0 - docs/models/agentsapiv1conversationsdeleterequest.md: - id: c2c9f084ed93 - last_write_checksum: sha1:9ecca93f8123cebdd1f9e74cf0f4a104b46402a8 - pristine_git_object: c6eed281331cb4d2cac4470de5e04935d22eca5a - docs/models/agentsapiv1conversationsgetrequest.md: - id: d6acce23f92c - last_write_checksum: sha1:b5d5529b72c16293d3d9b5c45dcb2e3798405bcf - pristine_git_object: 67d450c88778cb27d7d0ba06d49d9f419840b32e - docs/models/agentsapiv1conversationshistoryrequest.md: - id: e3efc36ea8b5 - last_write_checksum: sha1:4155100eaed6d3b7410b3f4476f000d1879576be - pristine_git_object: 7e5d39e9a11ac437a24b8c059db56527fa93f8b0 - docs/models/agentsapiv1conversationslistrequest.md: - id: 406c3e92777a - last_write_checksum: sha1:d5c5effcf2ca32900678d20b667bdaf8ca908194 - pristine_git_object: 62c9011faf26b3a4268186f01caf98c186e7d5b4 - docs/models/agentsapiv1conversationslistresponse.md: - last_write_checksum: sha1:1144f41f8a97daacfb75c11fdf3575e553cf0859 - docs/models/agentsapiv1conversationsmessagesrequest.md: - id: 2c749c6620d4 - last_write_checksum: sha1:781e526b030653dc189d94ca04cdc4742f9506d2 - pristine_git_object: a91ab0466d57379eacea9d475c72db9cb228a649 - docs/models/agentsapiv1conversationsrestartrequest.md: - id: 6955883f9a44 - last_write_checksum: sha1:99c1455c7fde9b82b6940e6e1ed4f363d7c38de9 - pristine_git_object: a18a41f5395adae3942573792c86ddf7c3812ff4 - docs/models/agentsapiv1conversationsrestartstreamrequest.md: - id: 0c39856fd70e - last_write_checksum: sha1:d03475c088c059077049270c69be01c67a17f178 - pristine_git_object: 7548286af5d1db51fbfd29c893eb8afdc3c97c4d docs/models/agentscompletionrequest.md: id: 906b82c214dc last_write_checksum: sha1:84ee0378e413830260a279a67fc3b1342e643328 - pristine_git_object: 2a0c4144fb5919e5ce892db1210bde90820c127c + pristine_git_object: d87dc7da67dd883f92a23d8df4f5648e97c4f12e docs/models/agentscompletionrequestmessage.md: + id: 5337f0644b40 last_write_checksum: sha1:ecf7b7cdf0d24a5e97b520366cf816b8731734bb + pristine_git_object: 957703b528d3da6f57576064d7cb9b2af63c362a docs/models/agentscompletionrequeststop.md: id: ad1e0e74b6b8 last_write_checksum: sha1:b2422d4dada80d54b2dd499a6659a3894318d2c9 @@ -203,9 +131,11 @@ trackedFiles: docs/models/agentscompletionstreamrequest.md: id: 21d09756447b last_write_checksum: sha1:0c88bc63255733480b65b61685dcc356fcc9ed66 - pristine_git_object: b2ccd4e8fe2fc3f63d4b517f7ecfc21f3aef9d67 + pristine_git_object: dd1804a1b3a2aadc3e3c3964262b0fc25195703f docs/models/agentscompletionstreamrequestmessage.md: + id: b309ade92081 last_write_checksum: sha1:98744c9646969250242cbbfbdf428dbd7030e4bb + pristine_git_object: 6ccf4244a709de7bedbf75042efb935129a6ca01 docs/models/agentscompletionstreamrequeststop.md: id: 4925b6b8fbca last_write_checksum: sha1:c9d0d73ca46643ffdf02e6c6cd35de5c39460c20 @@ -215,25 +145,37 @@ trackedFiles: last_write_checksum: sha1:843c4946d5cab61df2cba458af40835c4e8bcafe pristine_git_object: 4354523a7d0d21721a96e91938b89236169ccced docs/models/agenttool.md: + id: 513b8b7bc0b7 last_write_checksum: sha1:9154d0ac6b0ab8970a10a8ad7716009d62e80ce7 + pristine_git_object: 022f7e10edb22cb1b1d741c13ac586bd136d03b5 docs/models/agentupdaterequest.md: id: 75a7f820b906 - last_write_checksum: sha1:306134659876c4e87324dfec879ab0b691a74f3a - pristine_git_object: 641d1e406f0fba0fce9f10c16a15f883c7095c07 + last_write_checksum: sha1:358e39130bc439f5801a2dcc73502a1f1c2c6685 + pristine_git_object: b1830d7be6cb8e33529246a3368deaf0909a3343 docs/models/agentupdaterequesttool.md: + id: 9c9aac9dda3d last_write_checksum: sha1:25d8a331a706bf8e6056b99f8ff1a46abff6ae72 + pristine_git_object: ce5531260e9b06db0b93d4bfcf95a12b627da522 docs/models/apiendpoint.md: id: be613fd9b947 last_write_checksum: sha1:4d984c11248f7da42c949164e69b53995d5942c4 pristine_git_object: 8d83a26f19241da5ce626ff9526575c50e5d27be + docs/models/appendconversationrequest.md: + id: 295b6d446690 + last_write_checksum: sha1:0c3d7091b19abf30fb0b78800cab292abd902c1d + pristine_git_object: 977d8e8b797c8ae36de4da90bc32bba47a6a0779 + docs/models/appendconversationstreamrequest.md: + id: aeea33736f95 + last_write_checksum: sha1:a0b5b036e46688e862c7f7671c86f965b5322742 + pristine_git_object: a23231c2c2f0017ba29c8863c3046aebe8f57ff1 docs/models/archiveftmodelout.md: id: 9e855deac0d1 - last_write_checksum: sha1:ab79a7762ca33eb1f16b3ed2e5aa5318ec398829 - pristine_git_object: 46a9e755555480d333f91adfe840cdf09313e6c2 - docs/models/archiveftmodeloutobject.md: - id: 9afeccafe5b6 - last_write_checksum: sha1:4bf1b38dc9b6f275affaf353b4bf28bc63ef817c - pristine_git_object: f6f46889da24995f8e5130def3140a9fd1aff57c + last_write_checksum: sha1:41866e666241ed42e5e7c6df5a64b887f1ff774b + pristine_git_object: 98fa7b19e4579198b433eccc76b2b4d990476b72 + docs/models/archivemodelrequest.md: + id: 3fde72a45ad9 + last_write_checksum: sha1:60eaa9be631215c63a2c01da7da809ec34f5b01a + pristine_git_object: 806d135e2bc6c0da2b20a4bb84107d3ab31962ad docs/models/arguments.md: id: 7ea5e33709a7 last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec @@ -257,7 +199,7 @@ trackedFiles: docs/models/audiochunk.md: id: 88315a758fd4 last_write_checksum: sha1:d52e493765280fc0b1df61a0ce1086205965c712 - pristine_git_object: c443e7ade726ba88dd7ce9a8341687ef38abe598 + pristine_git_object: 8a04af045f4ce33a2964f5f75664e82c3edf1bf3 docs/models/audioencoding.md: id: 1e0dfee9c2a0 last_write_checksum: sha1:5d47cfaca916d7a47adbea71748595b3ab69a478 @@ -277,7 +219,7 @@ trackedFiles: docs/models/basemodelcard.md: id: 2f62bfbd650e last_write_checksum: sha1:4b29e0d24060b6724e82aeee05befe1cddb316f4 - pristine_git_object: 58ad5e25131804287b5f7c834afc3ad480d065a9 + pristine_git_object: 0f42504fd6446c0baf4686bfbb8481658b6789cd docs/models/batcherror.md: id: 8053e29a3f26 last_write_checksum: sha1:23a12dc2e95f92a7a3691bd65a1b05012c669f0f @@ -288,20 +230,12 @@ trackedFiles: pristine_git_object: 7dcf265dfe63cbbd13b7fa0e56fc62717f3ee050 docs/models/batchjobout.md: id: 49a98e5b2aba - last_write_checksum: sha1:82e0c730eeac4fc9ee787b213e4653cee1cca5aa - pristine_git_object: cb49649b87aeb3ec10068d96222e3d803c508324 - docs/models/batchjoboutobject.md: - id: 8964218f4f7e - last_write_checksum: sha1:8fffd069c91ea950d321cd41994df78df3eb2051 - pristine_git_object: 64ae89654c3d1a2743e67068f66fbd56f70c14b5 + last_write_checksum: sha1:b504fcf5a65567ec114fdc5b79cabe7554b36cac + pristine_git_object: 5f1011734b249a75cf9381d024f295fe31ff9f68 docs/models/batchjobsout.md: id: d8041dee5b90 - last_write_checksum: sha1:619fcebe753b14a34b7d3ba56f7b45c6c2690fad - pristine_git_object: a76cfdccf96ac2adf783417444be70c5b208582b - docs/models/batchjobsoutobject.md: - id: 885adfc869d5 - last_write_checksum: sha1:3fdc878e360b22d1074bd61f95d7461d478d78a2 - pristine_git_object: d4bf9f65ae546b160dd8ec5f3ecdc4228dc91bfa + last_write_checksum: sha1:5e4127548b50abbb6cee267ac53a8e05f55b97f9 + pristine_git_object: 7a9d6f688e87851ed7ffa516523e12cb3f967c68 docs/models/batchjobstatus.md: id: 7e6f034d3c91 last_write_checksum: sha1:9e876b4b94255e1399bbb31feb51e08691bcb8fc @@ -314,6 +248,18 @@ trackedFiles: id: 9d14e972f08a last_write_checksum: sha1:1f32eb515e32c58685d0bdc15de09656194c508c pristine_git_object: f96f50444aaa23ca291db2fd0dc69db0d9d149d9 + docs/models/cancelbatchjobrequest.md: + id: db6860fe9ec3 + last_write_checksum: sha1:d2f55d5ffec21f6f70cc77c643c73113b0d1ed43 + pristine_git_object: f31f843bb864fc21ed620e4e069b8a97a091d99c + docs/models/cancelfinetuningjobrequest.md: + id: 10d341c56c9c + last_write_checksum: sha1:a484ad9d8eb791d60e5447b845b73871e9f1e6a3 + pristine_git_object: 6525788cd527eca4d89f95d4c829c1b3eda0f06e + docs/models/cancelfinetuningjobresponse.md: + id: 0c9ca281a898 + last_write_checksum: sha1:ac02c2a268a21430e74f8075671de0b97fd844e6 + pristine_git_object: c512342e575e9b6d57da08b20f50c86510d246d8 docs/models/chatclassificationrequest.md: id: 57b86771c870 last_write_checksum: sha1:2ee5fff26c780ade7ed89617358befa93a6dfd23 @@ -321,17 +267,23 @@ trackedFiles: docs/models/chatcompletionchoice.md: id: 0d15c59ab501 last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 - pristine_git_object: d77d286eb0b2d2b018b6ff5f9617225be4fa9fa5 + pristine_git_object: deaa0ea073e1b6c21bd466c10db31db2464066f1 docs/models/chatcompletionchoicefinishreason.md: + id: 225764da91d3 last_write_checksum: sha1:b894d3408cb801e072c3c302a5676ff939d59284 + pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b docs/models/chatcompletionrequest.md: id: adffe90369d0 last_write_checksum: sha1:f6eec11c908ee6581e508fff98e785441c4b84ad - pristine_git_object: 109fa7b13d19ccc85e4633e64b44613640c171fb + pristine_git_object: f3abeeff4346c181cfca40eb819a8c6ecf656026 docs/models/chatcompletionrequestmessage.md: + id: 3f5e170d418c last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 + pristine_git_object: 91e9e062d0ef0cb69235c4ae4516548733ce28a9 docs/models/chatcompletionrequeststop.md: + id: fcaf5bbea451 last_write_checksum: sha1:71a25f84f0d88c7acf72e801ced6159546201851 + pristine_git_object: 749296d420c0671d2a1d6d22483b51f577a86485 docs/models/chatcompletionrequesttoolchoice.md: id: b97041b2f15b last_write_checksum: sha1:7ad7eb133f70e07d0d6a9def36aadd08b35cf861 @@ -343,9 +295,11 @@ trackedFiles: docs/models/chatcompletionstreamrequest.md: id: cf8f29558a68 last_write_checksum: sha1:7ed921e0366c1b00225c05e60937fb8d228f027b - pristine_git_object: 7d5fb411bde92e39910018cc2ad8d4d67ea980a1 + pristine_git_object: 42792d396462dead9d7a80a87f05a0888efe348b docs/models/chatcompletionstreamrequestmessage.md: + id: 053a98476cd2 last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 + pristine_git_object: 2e4e93acca8983a3ea27b391d4606518946e13fe docs/models/chatcompletionstreamrequeststop.md: id: d0e89a4dca78 last_write_checksum: sha1:a889e9580fa94bda7c848682d6ba501b7f5c0f41 @@ -357,13 +311,19 @@ trackedFiles: docs/models/chatmoderationrequest.md: id: 22862d4d20ec last_write_checksum: sha1:9bbe510ee67515092bd953ad7f84ae118398af54 - pristine_git_object: 69b6c1dc2c10abbbc2574f3782b2d85687661f11 + pristine_git_object: f252482db0e404e21a61aafba0d09d9561610c11 docs/models/chatmoderationrequestinputs1.md: + id: 89311e3e440d last_write_checksum: sha1:8d4c2dbd9207589aabf9c00cf60c61d2d3eef452 + pristine_git_object: e15b8a844110fae68c02da040cd0122be5afc09a docs/models/chatmoderationrequestinputs2.md: + id: 4daa876da841 last_write_checksum: sha1:e34eb6557e06e7783ed14d959c2a29959c26fd4c + pristine_git_object: f40a4ebe0780c493e8bd7a322aec31893669a181 docs/models/chatmoderationrequestinputs3.md: + id: aec173bca43b last_write_checksum: sha1:14ce49ace5845bc467fe1559b12374bfd36bc9a7 + pristine_git_object: ff1c6ea32233d5c5e8d6292c62f9e8eacd3340c3 docs/models/checkpointout.md: id: 909ce66e1f65 last_write_checksum: sha1:89e678d55b97353ad1c3b28d9f1ab101f6be0928 @@ -386,36 +346,28 @@ trackedFiles: pristine_git_object: f3b10727b023dd83a207d955b3d0f3cd4b7479a1 docs/models/classifierdetailedjobout.md: id: a2084ba5cc8c - last_write_checksum: sha1:63acd8a1921ac99143685722f8812b1f572d451f - pristine_git_object: ccc88f89ed81e6e879a88b9729c4945704370fd9 + last_write_checksum: sha1:ee206a5c68bd7aed201f8274d0710e8c570a35d2 + pristine_git_object: fb532449458fb445bb79d3fa0ed8e6faa538f00a docs/models/classifierdetailedjoboutintegration.md: + id: 7a775cbd4d9f last_write_checksum: sha1:6b2691766c1795d17b1572076a693eb377c5307f - docs/models/classifierdetailedjoboutobject.md: - id: 1ca54621f5bf - last_write_checksum: sha1:5ae3d2847a66487d70bc2ff97a8c31bbbba191c7 - pristine_git_object: 08cbcffc1c60c11c07d6e8c4724f46394f7d0854 + pristine_git_object: 9dfa6e8a179529bd12fb8935c264e3c57c62cb41 docs/models/classifierdetailedjoboutstatus.md: id: a98493f9d02d last_write_checksum: sha1:3441d9961e9093d314dd1bc88df1743cd12866d2 pristine_git_object: c3118aafa8614f20c9adf331033e7822b6391752 docs/models/classifierftmodelout.md: id: 268ac482c38b - last_write_checksum: sha1:dda3d6bf88fb6a3e860821aefb8a522d8a476b1d - pristine_git_object: dd9e8bf9c0ee291b44cd4f06146dea3d3280c143 - docs/models/classifierftmodeloutobject.md: - id: 6aa25d9fe076 - last_write_checksum: sha1:5a5fe345b3a2b3e65ce3171e8d6e9b9493ec7b06 - pristine_git_object: 9fe05bcf42325a390e5c984c7bdf346668944928 + last_write_checksum: sha1:46bdbe1176bbf43dd79a4ff8255129fd82bd97bc + pristine_git_object: 6e7afbbed075efe2e29f42b7bc3d758fe47460d4 docs/models/classifierjobout.md: id: 2e3498af3f8c - last_write_checksum: sha1:311f6ca4b6b625768c4ddd63e642e14e6a58df23 - pristine_git_object: aa1d3ca910535e283059903a2c39331673c1982b + last_write_checksum: sha1:70845cc24cd48987552ca337ea5522066e6de1b9 + pristine_git_object: ceecef5decdbd74a9741401ad0f1a9e8e215ae82 docs/models/classifierjoboutintegration.md: + id: 30a340fed57d last_write_checksum: sha1:72dfda442a88f977f3480c95127534a600362806 - docs/models/classifierjoboutobject.md: - id: 04543f046d40 - last_write_checksum: sha1:96863c621ddf0425b818edcd5da32ddbd5fd1194 - pristine_git_object: 1b42d547de7bdfb109c3ff750c6754e15ec4a8c1 + pristine_git_object: 33af8a708618c1e54c7f55e67c8848fe45217799 docs/models/classifierjoboutstatus.md: id: 2411c6bf3297 last_write_checksum: sha1:6ceef218b783505231a0ec653292460e6cb1a65b @@ -439,7 +391,7 @@ trackedFiles: docs/models/codeinterpretertool.md: id: f009740c6e54 last_write_checksum: sha1:bce278ce22703246613254ee2dac57f8b14e8060 - pristine_git_object: d5ad789ed012accaa105ced4f8dfd8e9eb83d4a3 + pristine_git_object: 544cda9358faf6ec525d06f78068817aee55b193 docs/models/completionargs.md: id: 3b54534f9830 last_write_checksum: sha1:c0368b7c21524228939b2093ff1a4524eb57aeb7 @@ -454,16 +406,16 @@ trackedFiles: pristine_git_object: 7f8ab5e631e2c6d1d9830325e591a7e434b83a35 docs/models/completiondetailedjobout.md: id: 634ca7241abd - last_write_checksum: sha1:e5edf096998b6b8e2048f354bd694288dd609875 - pristine_git_object: 84613080715078a73204d3984e7f97477ef548ae + last_write_checksum: sha1:7899568eedfa04cccb5b07c2e0d1e821af8fb0a2 + pristine_git_object: bc7e5d1cb5c298d0d935a9e3472ad547b5b9714c docs/models/completiondetailedjoboutintegration.md: + id: f8d1f509f456 last_write_checksum: sha1:3317db3f71962623a6144e3de0db20b4abfd5b9b - docs/models/completiondetailedjoboutobject.md: - id: 8e418065aa1c - last_write_checksum: sha1:d429d772a6a4249809bbf0c26a6547e5f2de3f11 - pristine_git_object: 1bec88e5f4c5f082c53157b8ee95b4b05cb787e3 + pristine_git_object: 9e526053160cc393dc65242cff8f8143bc67e38c docs/models/completiondetailedjoboutrepository.md: + id: a8e7452065a7 last_write_checksum: sha1:b1910efc6cd1e50391cd33daef004441bac3d3cd + pristine_git_object: 92a7b75c51f27e73ca41d5ffee28921057959878 docs/models/completiondetailedjoboutstatus.md: id: c606d38452e2 last_write_checksum: sha1:1e9a5736de32a44cf539f7eaf8214aad72ec4994 @@ -474,26 +426,24 @@ trackedFiles: pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d docs/models/completionftmodelout.md: id: 93fed66a5794 - last_write_checksum: sha1:17c4ed9718d6556ddb103cff5a5823c3baa18f41 - pristine_git_object: cd0858258521ced3990ff393fd00c11ef0abe094 - docs/models/completionftmodeloutobject.md: - id: c6e5667c5f03 - last_write_checksum: sha1:b4cbdc01a2b439d923ad542cf852797c24d234e8 - pristine_git_object: 6f9d858caa563f4a25ae752dd40ba632ecd0af75 + last_write_checksum: sha1:ee4bccae36229f23b1db8894585cc8e88ad71f6d + pristine_git_object: ccd4844fab92d000de1cc9ba59c884e31dc5db26 docs/models/completionjobout.md: id: 77315b024171 - last_write_checksum: sha1:1070ddeaef67a65f27a365a57d343a83b4b40aca - pristine_git_object: cb471746c4f23d2ec8451f4c45bf57e2f001072f + last_write_checksum: sha1:a08ca1dcedbb9b88b9909a4b03251e2fb0cd8319 + pristine_git_object: 5eb44eef73872b0f1c2709381fc0852e3b3e224b docs/models/completionjoboutintegration.md: + id: 25e651dd8d58 last_write_checksum: sha1:59711a3fa46d6a4bff787a61c81ecc34bdaaec2e - docs/models/completionjoboutobject.md: - id: 922a1e3a4e33 - last_write_checksum: sha1:020211def2c4cd969398cf009b187ca19bd7a943 - pristine_git_object: 712b107d79a8c60c4330da4f3af307545bf1a7ec + pristine_git_object: 6474747bf8d38485f13b1702e3245ef9e0f866a9 docs/models/completionjoboutrepository.md: + id: 2c94b3ecacf1 last_write_checksum: sha1:2cb5b23640eeaf87f45dc9f180247ed7a6307df7 + pristine_git_object: 52f65558f8b3663596642d8854df36d29858beae docs/models/completionjoboutstatus.md: + id: b77ebfd0e4f0 last_write_checksum: sha1:b8f33134c63b12dc474e7714b1ac19d768a3cbbd + pristine_git_object: 917549450a096397d9a7ca0b8f5856f7cd62db04 docs/models/completionresponsestreamchoice.md: id: d56824d615a6 last_write_checksum: sha1:0296a490df009dbfd04893fdebcc88dd6102a872 @@ -541,7 +491,7 @@ trackedFiles: docs/models/conversationhistory.md: id: 7e97e8e6d6e9 last_write_checksum: sha1:06df76a87aca7c5acd5a28ca3306be09a8bb541b - pristine_git_object: ebb1d5136cebf2bc9b77047fe83feecc68532d03 + pristine_git_object: c8baad0b597ddb9148413a651a659b06c20351ac docs/models/conversationhistoryobject.md: id: 088f7df6b658 last_write_checksum: sha1:bcce4ef55e6e556f3c10f65e860faaedc8eb0671 @@ -561,17 +511,23 @@ trackedFiles: docs/models/conversationrequest.md: id: dd7f4d6807f2 last_write_checksum: sha1:e4da423f9eb7a8a5d0c21948b50e8df08a63552c - pristine_git_object: 2b4ff8ef3398561d9b3e192a51ec22f64880389c + pristine_git_object: bd7823a88a07d4bc8fe1da82e51f843e70480ee1 docs/models/conversationrequestagentversion.md: + id: 68aad87b1459 last_write_checksum: sha1:fd2e9cd7ed2499b5843c592505ec5e0596a50b33 + pristine_git_object: 9f2518211256762d03dec12c4c4464d48f7ed52c docs/models/conversationrequesthandoffexecution.md: + id: 9733b1e121d1 last_write_checksum: sha1:f7df210a46acf24abb1312123aebe9e595a190e8 + pristine_git_object: e7314f7e0080ff3f1a80afdbb229c78df5b008bb docs/models/conversationrequesttool.md: + id: bd1bb6fcea8b last_write_checksum: sha1:69d503d73f5bd044882d13cd0c7de188dd5f4831 + pristine_git_object: 2e4e8d01b5482c4e0644be52e55bf6912aeff69e docs/models/conversationresponse.md: id: 2eccf42d48af last_write_checksum: sha1:17ebabdf1dd191eeac442046511c44120dfa97a1 - pristine_git_object: 38cdadd0055d457fa371984eabcba7782e130839 + pristine_git_object: e31821288dd18bf425e442787f67a69ea35ff6a6 docs/models/conversationresponseobject.md: id: 6c028b455297 last_write_checksum: sha1:76270a07b86b1a973b28106f2a11673d082a385b @@ -603,7 +559,7 @@ trackedFiles: docs/models/conversationstreamrequest.md: id: 833f266c4f96 last_write_checksum: sha1:5cb58852d393eb6cc504b45d8b238fc2f3eecd2a - pristine_git_object: 299346f8aaa8ccddcbf7fd083389b74346ef2d4f + pristine_git_object: 8b74f9e7cdea83a5622df2c3b79debe3c4427288 docs/models/conversationstreamrequestagentversion.md: id: e99ccc842929 last_write_checksum: sha1:0ba5fca217681cdc5e08e0d82db67884bed076a6 @@ -613,29 +569,69 @@ trackedFiles: last_write_checksum: sha1:ef2ebe8f23f27144e7403f0a522326a7e4f25f50 pristine_git_object: c98e194c1d204c3a5d4234f0553712a7025d7f85 docs/models/conversationstreamrequesttool.md: + id: 71df6212ff44 last_write_checksum: sha1:f2882742a74dd2b4f74383efa444c7ab968249dc + pristine_git_object: 0f75f82b38f224340bed468ceecfe622066740ba docs/models/conversationusageinfo.md: id: 57ef89d3ab83 last_write_checksum: sha1:d92408ad37d7261b0f83588e6216871074a50225 pristine_git_object: 57e260335959c605a0b9b4eaa8bf1f8272f73ae0 + docs/models/createfinetuningjobresponse.md: + id: a9d31306296c + last_write_checksum: sha1:a15ccee66983fcc23321f966440d02fab4463178 + pristine_git_object: f82cd793b466b0028b586781d36c690c0e5f97cd + docs/models/createorupdateagentaliasrequest.md: + id: be33079aa124 + last_write_checksum: sha1:84cb72c549ee74c44dcf00b3f6a100060e322295 + pristine_git_object: af2591ebb584965f5110ed987993f3a72b513255 + docs/models/deleteagentaliasrequest.md: + id: c116b5c42b1b + last_write_checksum: sha1:51e1544cc867389120a2d1fbb4780c855690841e + pristine_git_object: 17812ec4a03b452a2d31950cc5a9e87a8f6d79f7 + docs/models/deleteagentrequest.md: + id: 6411b6df1c85 + last_write_checksum: sha1:1157d4717b75be91744bd7464c042e367faa4b71 + pristine_git_object: 0aaacae471dd81ddc5ce4808abdd2b5653503ff6 + docs/models/deleteconversationrequest.md: + id: 7247871c454c + last_write_checksum: sha1:a43ed3e32630fbb41921fa413ab2a26a914e425e + pristine_git_object: 39d9e5dfd52d9df1d1da7093761b65e0d12a0b40 + docs/models/deletedocumentrequest.md: + id: 898eebfc019e + last_write_checksum: sha1:f06a13be4484048cf15c21d46eb2d107057b39db + pristine_git_object: eb060099f1b078fd084551338b51ee6677e8d235 docs/models/deletefileout.md: id: c7b84242a45c last_write_checksum: sha1:f2b039ab88fc83ec5dd765cab8e2ed8cce7e417d pristine_git_object: 4709cc4958d008dc24430deb597f801b91c6957f + docs/models/deletefilerequest.md: + id: ca151d3da83a + last_write_checksum: sha1:ec50f13b099a6ef28d7965f7c8721ce1f505f7d2 + pristine_git_object: bceae901954471a8667a3a61e66da6361ef50190 + docs/models/deletelibraryaccessrequest.md: + id: ca39ae894c1f + last_write_checksum: sha1:41b7cd5c2e4616d3edefeb271dd7089fa04bd67d + pristine_git_object: c7034b98c30234a0a8cb368d84d9b287690027de + docs/models/deletelibraryrequest.md: + id: 4be1af37ab41 + last_write_checksum: sha1:2769939a702c26be619f6c455cd48365b64110cc + pristine_git_object: c229ad73b2a7c39dab0ccdfa29e1f0475f0cdc7b docs/models/deletemodelout.md: id: 5643e76768d5 last_write_checksum: sha1:1593c64f7673e59b7ef1f4ae9f5f6b556dd6a269 pristine_git_object: 5fd4df7a7013dcd4f6489ad29cdc664714d32efd - docs/models/deletemodelv1modelsmodeliddeleterequest.md: - id: c838cee0f093 - last_write_checksum: sha1:e5b6d18b4f8ab91630ae34a4f50f01e536e08d99 - pristine_git_object: d9bc15fe393388f7d0c41abce97ead17e35e2ba4 + docs/models/deletemodelrequest.md: + id: 22c414d48ee4 + last_write_checksum: sha1:a60f549577b3461cb7552ad2080a34ad389f8579 + pristine_git_object: d80103f1610668292589b6d7b861de814c17afda docs/models/deltamessage.md: id: 6c5ed6b60968 last_write_checksum: sha1:00052476b9b2474dbc149f18dd18c71c86d0fc74 - pristine_git_object: 61deabbf7e37388fdd4c1789089d120cc0b937b9 + pristine_git_object: e0ee575f3fce7c312114ce8c5390efc5c4854952 docs/models/deltamessagecontent.md: + id: 7307bedc8733 last_write_checksum: sha1:a1211b8cb576ad1358e68983680ee326c3920a5e + pristine_git_object: 8142772d7ea33ad8a75cf9cf822564ba3f630de2 docs/models/document.md: id: cd1d2a444370 last_write_checksum: sha1:d00a2ac808a0ae83a7b97da87e647ecc8dca9c52 @@ -643,7 +639,7 @@ trackedFiles: docs/models/documentlibrarytool.md: id: 68083b0ef8f3 last_write_checksum: sha1:470b969fa4983c0e7ad3d513b4b7a4fa8d5f0f41 - pristine_git_object: 82315f32b920d32741b2e53bc10e411f74a85602 + pristine_git_object: 1695bad40cb0a1eb269e4ee12c6a81cbf0c7749a docs/models/documentout.md: id: a69fd1f47711 last_write_checksum: sha1:ed446078e7194a0e44e21ab1af958d6a83597edb @@ -657,7 +653,9 @@ trackedFiles: last_write_checksum: sha1:e0faccd04229204968dbc4e8131ee72f81288182 pristine_git_object: 0993886d56868aba6844824f0e0fdf1bdb9d74f6 docs/models/documentupload.md: + id: 7ff809a25eb0 last_write_checksum: sha1:aea0f81009be09b153019abbc01b2918a1ecc1f9 + pristine_git_object: 4e58a475f1776431c9c27a0fcdd00dd96257801f docs/models/documenturlchunk.md: id: 48437d297408 last_write_checksum: sha1:38c3e2ad5353a4632bd827f00419c5d8eb2def54 @@ -666,6 +664,10 @@ trackedFiles: id: a3574c91f539 last_write_checksum: sha1:a0134fc0ea822d55b1204ee71140f2aa9d8dbe9c pristine_git_object: 32e1fa9e975a3633fb49057b38b0ea0206b2d8ef + docs/models/downloadfilerequest.md: + id: 5acd7aafd454 + last_write_checksum: sha1:5d7056818ddc5860e43699917496ded68b91ddfa + pristine_git_object: 3f4dc6ccc6d1c67396fe97197067c5421d8dc2d5 docs/models/embeddingdtype.md: id: 22786e732e28 last_write_checksum: sha1:dbd16968cdecf706c890769d8d1557298f41ef71 @@ -695,7 +697,9 @@ trackedFiles: last_write_checksum: sha1:01c3c10e737bcd58be70b437f7ee74632972a983 pristine_git_object: 7c040b382d4c1b6bc63f582566d938be75a5f954 docs/models/entry.md: + id: da9a99ab48ab last_write_checksum: sha1:4971db390327db09f88feff5d2b8a0b1e6c5b933 + pristine_git_object: d934b6774b25713afe923154d7709755426ec2cf docs/models/eventout.md: id: 9960732c3718 last_write_checksum: sha1:dbc23814b2e54ded4aa014d63510b3a2a3259329 @@ -712,26 +716,6 @@ trackedFiles: id: ed6216584490 last_write_checksum: sha1:02767595f85228f7bfcf359f8384b8263580d53a pristine_git_object: 14cab13ee191ae60e2c5e1e336d0a5abc13f778b - docs/models/filesapiroutesdeletefilerequest.md: - id: 7fdf9a97320b - last_write_checksum: sha1:411e38d0e08a499049796d1557f79d669fc65107 - pristine_git_object: 1b02c2dbb7b3ced86ddb49c2323d1d88732b480c - docs/models/filesapiroutesdownloadfilerequest.md: - id: b9c13bb26345 - last_write_checksum: sha1:1f41dad5ba9bd63881de04d24ef49a0650d30421 - pristine_git_object: 8b28cb0e5c60ac9676656624eb3c2c6fdc8a3e88 - docs/models/filesapiroutesgetsignedurlrequest.md: - id: 08f3772db370 - last_write_checksum: sha1:26aa0140444ccef7307ef6f236932032e4784e8f - pristine_git_object: dbe3c801003c7bb8616f0c5be2dac2ab1e7e9fb1 - docs/models/filesapirouteslistfilesrequest.md: - id: 04bdf7c654bd - last_write_checksum: sha1:0a99755150c2ded8e5d59a96527021d29326b980 - pristine_git_object: 57d11722f1dba2640df97c22be2a91317c240608 - docs/models/filesapiroutesretrievefilerequest.md: - id: 2783bfd9c4b9 - last_write_checksum: sha1:a1249ef0aedb3056e613078488832c96b91f8cab - pristine_git_object: 961bae1f51a4ae9df21b28fd7a5ca91dc7b3888b docs/models/fileschema.md: id: 9a05a660399d last_write_checksum: sha1:97987d64285ff3092635754c78ad7b68d863e197 @@ -779,7 +763,7 @@ trackedFiles: docs/models/ftmodelcard.md: id: 15ed6f94deea last_write_checksum: sha1:1c560ceaaacc1d109b2997c36de03192dfcda941 - pristine_git_object: 35032775db8ae6f4c6fbac309edacd27ee7868af + pristine_git_object: 409f0526316a621b30dfbe45126c6b232e01fad4 docs/models/function.md: id: 416a80fba031 last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511 @@ -807,7 +791,7 @@ trackedFiles: docs/models/functioncallevent.md: id: cc9f2e603464 last_write_checksum: sha1:942d1bed0778ba4738993fcdbefe080934b641d5 - pristine_git_object: c25679a5d89745c1e186cdeb72fda490b2f45af2 + pristine_git_object: f406206086afa37cbc59aa551ac17a4814dddf7e docs/models/functionname.md: id: 4b3bd62c0f26 last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb @@ -827,15 +811,79 @@ trackedFiles: docs/models/functiontool.md: id: 5fb499088cdf last_write_checksum: sha1:a9a3b6530b1c48a8575402b48cde7b65efb33a7d - pristine_git_object: 8c42459304100777cf85416a5c3a984bc0e7a7ca + pristine_git_object: 0226b7045c9d82186e1111bb2025e96a4de90bd6 + docs/models/getagentagentversion.md: + id: 825de6d2614f + last_write_checksum: sha1:d99f384ff5ee73e68fa7f8581d7622068b5b7498 + pristine_git_object: 6d7b3f1d15994c24a5b992d1908fe8126da0e3ea + docs/models/getagentrequest.md: + id: 743f3a4630be + last_write_checksum: sha1:4d17d6b7b15e39520414085fc977be881e4e0a85 + pristine_git_object: 3f729dff0f7fc773f83593222da0dd0618b3e8b3 + docs/models/getagentversionrequest.md: + id: 4bf5feb4494a + last_write_checksum: sha1:d26546c2fdd78e0f52e2a2c50736b412ce814f6e + pristine_git_object: c98fee9d141f556520e16189e90234063e6861eb + docs/models/getbatchjobrequest.md: + id: 0c3a5debd663 + last_write_checksum: sha1:c186bbc6b04e1ed2db32f68fb22cb7eff4c1a90c + pristine_git_object: f3c67eb4a898a21e8a78c3340171458dcbd21d58 + docs/models/getconversationhistoryrequest.md: + id: 27de0e44ed80 + last_write_checksum: sha1:d89318332c87b5fa3bba22a52e209bdd5702b3f0 + pristine_git_object: fc90282bd9308a7531c3c532234fd332a223f243 + docs/models/getconversationmessagesrequest.md: + id: 82bf9b5c275b + last_write_checksum: sha1:cdbb0371c7a35e84f7938d28719acd843ebc15ce + pristine_git_object: fd037fea6c09d97bfb74332838a2b2760de4dccb + docs/models/getconversationrequest.md: + id: ad6c903380f6 + last_write_checksum: sha1:ee93a91d5daa01fc937dd09589b268bb2e42868a + pristine_git_object: 8a66a8b032cb67503c0f6b95c98e0a40b13d16ec + docs/models/getdocumentextractedtextsignedurlrequest.md: + id: d47f32212cf5 + last_write_checksum: sha1:7d695630988d5ab3773aabfe17c3fa9177d7e9c9 + pristine_git_object: ff703802ddfe0e36768daf87f4c5626028642370 + docs/models/getdocumentrequest.md: + id: 4208f9b571b3 + last_write_checksum: sha1:45f6807e2f7cd4c30f95304172cb556896571b76 + pristine_git_object: 29f62127b09511f14a065b9b6f6068e63643ab7c + docs/models/getdocumentsignedurlrequest.md: + id: 734960a10101 + last_write_checksum: sha1:04debc445e51e7d0f922bfe7873d639a844c17b4 + pristine_git_object: 72a179c086e38650afd81165575c7926d9566f69 + docs/models/getdocumentstatusrequest.md: + id: d0a69468ea34 + last_write_checksum: sha1:a8d91948737e4fa392221ec18970d27af90c203e + pristine_git_object: 3557d7738be21206061ef5806b79118432b33f26 + docs/models/getdocumenttextcontentrequest.md: + id: 6baa6485417b + last_write_checksum: sha1:5b47d1d8d5675e4b9f477c8034ef64afc912cd06 + pristine_git_object: 8593340139f28b44dfed455849198f5d5a457643 + docs/models/getfilesignedurlrequest.md: + id: c7b1953174af + last_write_checksum: sha1:d558115d1611827f461cc6a9f373885271c7a51d + pristine_git_object: 0be3b2888b0680d5a5fac0057cedc279d112ddb8 + docs/models/getfinetuningjobrequest.md: + id: c18796fe85f3 + last_write_checksum: sha1:8166520e2d657098131fd77c81a86099ed4d3486 + pristine_git_object: f20cb2148330c7078c6e93f55aa99f1b09086eaf + docs/models/getfinetuningjobresponse.md: + id: 8f50d4a61ae1 + last_write_checksum: sha1:509e8d190b43b5a4a3e0ae7d97bf2b4262fcd1f8 + pristine_git_object: 1b0568dd8019879ec2e1d0ff039296f600415e21 + docs/models/getlibraryrequest.md: + id: 9c9a9e6c4f03 + last_write_checksum: sha1:822494a821ee3a51a477f305c140ed39cd6465fc + pristine_git_object: 2a3acf50a6300ea3bcbc3b8432fe28cbef82c620 docs/models/githubrepositoryin.md: id: b42209ef8423 last_write_checksum: sha1:5ab33fc1b0b5513086b1cae07f416d502441db23 - pristine_git_object: 1584152ba934756793d5228d5691c07d3256c7b8 + pristine_git_object: 241cf584d5e2425e46e065f47a18bea50fa624db docs/models/githubrepositoryout.md: id: 0ca86e122722 last_write_checksum: sha1:0e3999cef8a745ae24ac36907b3431bc5103ea6f - pristine_git_object: 03f0b2661e46b48489ede1208d9c38c4324b2b35 + pristine_git_object: fe38393a0cc2eb5c0b0c4690cb0c4e5e3ec41df8 docs/models/httpvalidationerror.md: id: a211c095f2ac last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e @@ -847,7 +895,7 @@ trackedFiles: docs/models/imagegenerationtool.md: id: d5deb6b06d28 last_write_checksum: sha1:b3decee8fe7a824401f9afbd3544a69ccde4ef8e - pristine_git_object: b8fc9cf40c8cb010231837ffe3d66cb3762dd666 + pristine_git_object: 0c8de72cdd7149217010ae5d02777d1c5dd9896c docs/models/imageurl.md: id: e75dd23cec1d last_write_checksum: sha1:30131c77dd240c3bae48d9693698358e5cc0ae63 @@ -855,13 +903,15 @@ trackedFiles: docs/models/imageurlchunk.md: id: 4407097bfff3 last_write_checksum: sha1:73e14a0beccfc9465ee6d2990462e609903f5cd5 - pristine_git_object: f1b926ef8e82443aa1446b1c64c2f02e33d7c789 + pristine_git_object: 43078c7849fb3e808c2eaeaa5a3caeab2619d700 docs/models/imageurlchunktype.md: id: b9af2db9ff60 last_write_checksum: sha1:990546f94648a09faf9d3ae55d7f6ee66de13e85 pristine_git_object: 2064a0b405870313bd4b802a3b1988418ce8439e docs/models/imageurlunion.md: + id: 9d3c691a9db0 last_write_checksum: sha1:4e32bcd7d44746d2ddbfafbef96152bb2bdb2a15 + pristine_git_object: db97130f26199dcb354ecb7469d09530b035daa2 docs/models/inputentries.md: id: a5c647d5ad90 last_write_checksum: sha1:4231bb97837bdcff4515ae1b00ff5e7712256e53 @@ -871,179 +921,53 @@ trackedFiles: last_write_checksum: sha1:19d8da9624030a47a3285276c5893a0fc7609435 pristine_git_object: 0f62a7ce8e965d0879507e98f808b9eb254282a6 docs/models/inputsmessage.md: + id: 174dcada287d last_write_checksum: sha1:92a95c1757e33603d1aa9ed6c9912d1c551d9974 + pristine_git_object: e3543fb4f9fff679b25f7f803eb2e8dabd56368f docs/models/instructrequest.md: id: a0034d7349a2 last_write_checksum: sha1:34a81411110cbb7a099c45e482f5d1702ae48fd3 - pristine_git_object: 9500cb588b5d27d934b04cc5fa0be26a270f6d82 + pristine_git_object: 5f0cdfff135fb72d3b1a81999a30b720c044e3d4 docs/models/instructrequestinputs.md: id: 2a677880e32a last_write_checksum: sha1:64bcc6371d70446da60f167682504568d7f2618c - pristine_git_object: 4caa028f85be2324966e61321c917cbd0c65de01 + pristine_git_object: 931ae5e47df2d2850e3ef6740e2b89e1e0138297 docs/models/instructrequestmessage.md: + id: 380503708a09 last_write_checksum: sha1:551b5d6dd3ba0b39cad32478213a9eb7549f0023 + pristine_git_object: 57ed27ab3b1430514797dd0073bc87b31e5e3815 docs/models/jobin.md: id: 1b7b37214fa8 - last_write_checksum: sha1:16436f5d3222b89d604cf326bde749d9e6f9da39 - pristine_git_object: b96517705cea7b9efd266f146080ad1aed3cc8cb + last_write_checksum: sha1:0a241378cf3791c5c3fa733f30d45c07ef841448 + pristine_git_object: 62da90727898dd84f547c436c17fefa788e4f0d6 docs/models/jobinintegration.md: + id: 200c505fa67f last_write_checksum: sha1:c9887897357e01e6e228b48d6bf0c3fb4edd29f7 + pristine_git_object: 103820e7ec55769227610c385addbecfcd075cae docs/models/jobinrepository.md: + id: 9ab1d5469c10 last_write_checksum: sha1:1773f59546b94688d0be16d3f5f014cd86f5b1d7 + pristine_git_object: e873ae63f359d6ac4aca03b058a7c25fbbf2ba32 docs/models/jobmetadataout.md: id: 30eb634fe247 last_write_checksum: sha1:46d54b6f6004a6e571afd5207db5170dfbce7081 pristine_git_object: 6218a161b71abbb35eb4ca6e3ce664226983efc2 - docs/models/jobsapiroutesbatchcancelbatchjobrequest.md: - id: 798cb1ca1385 - last_write_checksum: sha1:67e8bda117608aee0e09a702a1ef8a4b03c40b68 - pristine_git_object: c19d0241784ff69bc68a11f405437400057d6f62 - docs/models/jobsapiroutesbatchgetbatchjobrequest.md: - id: e83a7ec84f8a - last_write_checksum: sha1:d661875832b4b9d5f545262216c9fcb9a77c8cd0 - pristine_git_object: 8c259bea9bef11f779fd609f1212565d574457e2 - docs/models/jobsapiroutesbatchgetbatchjobsrequest.md: - id: 5b9c44ad4d31 - last_write_checksum: sha1:8e28b08c86355b097836e55559fda85487000092 - pristine_git_object: b062b8731ca7c99af968be2e65cca6aa5f122b37 - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md: - id: 8eb8c127091e - last_write_checksum: sha1:2b93a6bed5743461bb03c8337fb25dfc5a15522e - pristine_git_object: f9700df50b8f512c4139c1830aba18989d022b8e - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md: - id: deff83b39b78 - last_write_checksum: sha1:dac8d8f2e95aed2db9b46711e6e80816881d5d14 - pristine_git_object: 883cbac685563d2e0959b63638f6b967ebdf1ee9 - docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md: - id: c45757ba1ed9 - last_write_checksum: sha1:52d4f945aff24c03627111d0e7c73cbbba60129f - pristine_git_object: 1b331662b17cd24c22e88b01bf00d042cb658516 - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md: - id: 8aa8030f26d7 - last_write_checksum: sha1:619bb7677fa549f5089fde98f3a00ab1d939f80d - pristine_git_object: eeddc3cdfdd975cdb69fbfcd306e9445010eb82f - docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md: - id: a9b75762e534 - last_write_checksum: sha1:8f1395447928e089c88dce8c0ced1030ec5f0eba - pristine_git_object: fde19800303a901149bf39c5330ef8c4da87df62 - docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md: - id: c0b31f4fc621 - last_write_checksum: sha1:6f70f5cabb62e2df7c1e4086f7a8b100143cc2aa - pristine_git_object: e0d2e3610ce460d834c2d07d9a34b09f8257217b - docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md: - id: 52078f097503 - last_write_checksum: sha1:fc134fdc7e229b8df373b77096c8299c214171a7 - pristine_git_object: 3dca3cd85245e0956b557fc5d6ae6c5e265df38d - docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md: - last_write_checksum: sha1:bbc08ca53c2da180b96ed0347cf4954410c79311 - docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md: - id: b4e2b814d8c3 - last_write_checksum: sha1:f13b5c8f2e74cc73b58a30d366032c764603f95e - pristine_git_object: 4429fe480ab9486de98940a119ac63f40045313b - docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md: - id: cfd848845787 - last_write_checksum: sha1:b3a64f467ab1c16427ef77d3acb0749ab155e213 - pristine_git_object: 64f4cca608f8e505f9eeaac623955200dd5b9553 - docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md: - id: 75b5dd1bcbaa - last_write_checksum: sha1:dd30e7ff8748d26497458f3398c0547113dc058f - pristine_git_object: 95c1734daa7164bedeeb1fa58dd792939f25bc17 - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md: - id: 60bd2e28993a - last_write_checksum: sha1:7ff770c3d0148a4818957b279875bbe5b1ecfc62 - pristine_git_object: 6d93832e68739e465de7c61993b8bcfa1468bafc - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md: - id: c265a30fd4cf - last_write_checksum: sha1:e1a739e755b4e573f592743cd34116da97a67450 - pristine_git_object: 54f4c3981978e1ac4bdf42d5b746b73a62d13162 docs/models/jobsout.md: id: cbe31f43047d - last_write_checksum: sha1:73e1ce0ff11741c22dc00d768055ad603034147c - pristine_git_object: 977013f7a679dd89fb48c4a95b266a9ea5f3f7cf + last_write_checksum: sha1:4bd9ffbd2e5a286090167c795b9c3970e3c7d0a5 + pristine_git_object: 69f8342ac6f02a6e60d05b6f5b3cd892964fd3d7 docs/models/jobsoutdata.md: id: 809574cac86a last_write_checksum: sha1:06455044d314c4edbd1ce4833d551c10918f0a3e pristine_git_object: 28cec31117416b79eb8688d84b47b157974574cc - docs/models/jobsoutobject.md: - id: 1c99619e2435 - last_write_checksum: sha1:cffbcfb8673e12feb8e22fd397bf68c8745c76bb - pristine_git_object: f6c8a2c3079003a885ee9bdfc73cf7c7c7d8eded docs/models/jsonschema.md: id: a6b15ed6fac8 last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f pristine_git_object: 7ff7c070353c58290416aff5b01d1dfc43905269 docs/models/legacyjobmetadataout.md: id: b3b8c262f61a - last_write_checksum: sha1:bc611bf233bd5b224b1367c6b800de6c3b589b38 - pristine_git_object: 53a45485b70017e729709359407d6c9f3e0fbe35 - docs/models/legacyjobmetadataoutobject.md: - id: 5bafaafb6137 - last_write_checksum: sha1:30e5942a6d0c9fde35d29cd9d87a4304b0e4fa26 - pristine_git_object: 9873ada894f79647c05e386521c6b4208d740524 - docs/models/librariesdeletev1request.md: - id: c0c3b2e1aabc - last_write_checksum: sha1:bef84f8851b06d2d914b605f11109de1850d0294 - pristine_git_object: 68d7e54369ce75422bf8b0ff16cada1c0ae2b05c - docs/models/librariesdocumentsdeletev1request.md: - id: 9d557bd7d1cc - last_write_checksum: sha1:1b580b657559356886915ee5579b90a03db19337 - pristine_git_object: efccdb1bbc36cf644ed2d1716cbd202e6d6bf6c5 - docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md: - id: 27ad38ce4cb1 - last_write_checksum: sha1:b35ad610330232b395b5f87cc15f6ae270de6816 - pristine_git_object: 14ca66f72693f1df05eb93e0cca45f440b62d282 - docs/models/librariesdocumentsgetsignedurlv1request.md: - id: 4498715b6cfb - last_write_checksum: sha1:31f78079e31e070d080c99555cd2d85318fc4610 - pristine_git_object: 7c08c180d59a8e8475fea89424b8b2021d51385f - docs/models/librariesdocumentsgetstatusv1request.md: - id: c2219d3a3738 - last_write_checksum: sha1:44e79df94cf2686e83d7a2e793140a6a7b3a1c05 - pristine_git_object: e6d41875966348fd9e770d06c8099e48f0e64b5d - docs/models/librariesdocumentsgettextcontentv1request.md: - id: 850dfa465952 - last_write_checksum: sha1:4a1212e111525f4265d2924ce52f9c13d2787d4d - pristine_git_object: 2f58a4460ccdad531391318c62191e76c1ec22ac - docs/models/librariesdocumentsgetv1request.md: - id: cdd0df2f7e9d - last_write_checksum: sha1:36e5ef39552159044ecd28d20ee0792ea5bcadef - pristine_git_object: 6febc058425bb38857c391ee4c40d600858e6058 - docs/models/librariesdocumentslistv1request.md: - id: 7b5756e50d64 - last_write_checksum: sha1:2605b7972a3d7b4f73ab8052be4bf740f44f6f6f - pristine_git_object: 44f6300115853053214639982516a60b3268e778 - docs/models/librariesdocumentsreprocessv1request.md: - id: 1b8bf57b3f0a - last_write_checksum: sha1:8528785c1b4ae18d6ec6f261d29d5daac0d420a3 - pristine_git_object: 196ba17b749ce9efc1c30189864e474896814f85 - docs/models/librariesdocumentsupdatev1request.md: - id: b9147b1c0e38 - last_write_checksum: sha1:45b2cc114886b300e3b996a8b71241ac5c7260a3 - pristine_git_object: 2f18b014af4577a0ae862dfeea599d5f700005cb - docs/models/librariesdocumentsuploadv1request.md: - id: 89a89d889c72 - last_write_checksum: sha1:32294a87d8a0b173b4d6f12b607a1bb3da765776 - pristine_git_object: 7c91ca9b92839be8ab1efb4428cc8d7a78d57e1e - docs/models/librariesgetv1request.md: - id: f47ad71ec7ca - last_write_checksum: sha1:3b2bf1e4f6069d0c954e1ebf95b575a32c4adeac - pristine_git_object: 6e1e04c39c15a85d96710f8d3a8ed11a22412816 - docs/models/librariessharecreatev1request.md: - id: 99e7bb8f7fed - last_write_checksum: sha1:e40d710ad1023768a0574b3283ef35544f6b0088 - pristine_git_object: 4c05241de4ee5a76df335ae9ea71004bd02b8669 - docs/models/librariessharedeletev1request.md: - id: bc8adba83f39 - last_write_checksum: sha1:79fc5a9a3cee5b060f29edd95f00e0fea32579cf - pristine_git_object: 850e22ab79863ba544f453138322c0eb5bf544cd - docs/models/librariessharelistv1request.md: - id: 86e6f08565e2 - last_write_checksum: sha1:6f2ffff66fa5fb141d930bca7bb56e978d62b4a5 - pristine_git_object: 98bf6d17ab013c1dd3f0ab18c37bbfc1a63f1b76 - docs/models/librariesupdatev1request.md: - id: f7e51b528406 - last_write_checksum: sha1:cec4aa232c78ca2bd862aee3d5fb3bcc2ad9dc05 - pristine_git_object: a68ef7a8f52ee4a606cb88d0a3f96de8c2fbccb8 + last_write_checksum: sha1:d8c4e7525e2dc2f4d29bfeb6cadc648fab1c62c7 + pristine_git_object: 8a712140fbf3c36f4bd9686e135b70d8688aa9c1 docs/models/libraryin.md: id: a08170e6397c last_write_checksum: sha1:2c996ecf1ae5d9e8df702a79741b72b3571eb6ef @@ -1056,14 +980,58 @@ trackedFiles: id: 2e8b6d91ded2 last_write_checksum: sha1:d71053b44725147265871be445217e3e1a0e5ede pristine_git_object: ebf46d57de6bad7022a3e8cb8eaf88728bbbe888 + docs/models/listagentaliasesrequest.md: + id: 495659b2d40a + last_write_checksum: sha1:637e7e0e8deadcf2e77cc9469727010f90f0ad79 + pristine_git_object: b3570cb80d484dadaf2a138c70bbb477746ba416 + docs/models/listagentsrequest.md: + id: aeb9bbc163f5 + last_write_checksum: sha1:86c5f5068061b79d2e582e4dd9a8b0ed4c84cbcf + pristine_git_object: 79aec3ea6e3506797fc96a7ca9d7393543270866 + docs/models/listagentversionsrequest.md: + id: 3270f6dd4107 + last_write_checksum: sha1:14ffb20c5c48cca371ed27f6a6a8b565cd4a5565 + pristine_git_object: ba8ddaa5cb4c94623b29a1f635f38a04cc0ff497 + docs/models/listbatchjobsrequest.md: + id: e2a0b1528191 + last_write_checksum: sha1:01a587ec7cc6e183d47e106eb809e7c1e9e79e39 + pristine_git_object: 19981b2425254058bd24b218d1f7881fc3635c89 + docs/models/listconversationsrequest.md: + id: 6c0961051703 + last_write_checksum: sha1:453eb480cd48330f857b4c80210b6753a750348d + pristine_git_object: d99b420834b17f3f5b7fac630af7a7b0d2db341d + docs/models/listconversationsresponse.md: + id: 65075f5cf00c + last_write_checksum: sha1:8478c55b156c09f2b714d2854030a04494b48f7c + pristine_git_object: 9d611c553b245657181c06d7f65acaa9d8128556 docs/models/listdocumentout.md: id: 4bec19e96c34 last_write_checksum: sha1:c0b3a6e3841f120c52b1d7718d7226a52fe1b6d6 pristine_git_object: f14157b8db55c1201d9f7151742e9ddf0d191c16 + docs/models/listdocumentsrequest.md: + id: 36c8a1116534 + last_write_checksum: sha1:390849ce3d93a64c505b7b2f7cae411766a5e44b + pristine_git_object: 369e8edbe471dd5167ad1baf74ee5b00eb7d5043 docs/models/listfilesout.md: id: 98d4c59cc07e last_write_checksum: sha1:e76df31628984095f1123005009ddc4b59b1c2bc pristine_git_object: bcb1f13aa17f41dadb6af37541e929364e2d6cec + docs/models/listfilesrequest.md: + id: 70edaf3759f0 + last_write_checksum: sha1:686edbd5134dfe60cfd98221ec78d296a8429d28 + pristine_git_object: 2d76a76b011603e3a7c4b4932ef4b26def1cb792 + docs/models/listfinetuningjobsrequest.md: + id: 41878563fe80 + last_write_checksum: sha1:103cd0d3c5334ea60a6c6e1c2585bf9bd493c78f + pristine_git_object: 3a04fc709c2a12cc4f414701efcaec4584b7d6df + docs/models/listfinetuningjobsstatus.md: + id: 1d6d54dc70ea + last_write_checksum: sha1:c4f69e2b2b5aac719281d264722f2cba6aa048a0 + pristine_git_object: 07db9ae5d87b7192ada4843d4fe0d3e8573794c6 + docs/models/listlibraryaccessesrequest.md: + id: 0b387463f914 + last_write_checksum: sha1:2912e1fc3ee179f01fde7a21501e2501debecc2c + pristine_git_object: d98bcda22bbb2540a525f2ce1516a637446b0a0f docs/models/listlibraryout.md: id: ea34f8548bd6 last_write_checksum: sha1:cec920357bc48bea286c05d16c480a9a9369b459 @@ -1087,13 +1055,15 @@ trackedFiles: docs/models/messageinputentry.md: id: eb74af2b9341 last_write_checksum: sha1:07124339ecb87e31df5f0e2f887e23209dd269af - pristine_git_object: d55eb8769c3963518fcbc910d2e1398b6f46fd87 + pristine_git_object: 52183a32330b3e0bf91a1bd5e541dfda12d3f1a0 docs/models/messageinputentrycontent.md: id: 7e12c6be6913 last_write_checksum: sha1:6be8be0ebea2b93712ff6273c776ed3c6bc40f9a pristine_git_object: 65e55d97606cf6f3119b7b297074587e88d3d01e docs/models/messageinputentryobject.md: + id: 9a1d0d31f357 last_write_checksum: sha1:7746753005fda37834a73e62bf459eacb740ba5b + pristine_git_object: 6bdd62e27d7353dbb7d521ad02bde358496ab108 docs/models/messageinputentryrole.md: id: 2497d07a793d last_write_checksum: sha1:a41eb58f853f25489d8c00f7a9595f443dcca2e6 @@ -1129,7 +1099,7 @@ trackedFiles: docs/models/messageoutputevent.md: id: b690693fa806 last_write_checksum: sha1:d6538a4b5d5721c09bc196f3e9523ed45dafbea7 - pristine_git_object: 92c1c61587e34f6e143263e35c33acc9332870d6 + pristine_git_object: b0fa1a2d369c89ec75f43c6b31ff52b0d80d9b1c docs/models/messageoutputeventcontent.md: id: cecea075d823 last_write_checksum: sha1:16dac25382642cf2614e24cb8dcef6538be34914 @@ -1153,19 +1123,23 @@ trackedFiles: docs/models/modelconversation.md: id: 497521ee9bd6 last_write_checksum: sha1:440c9e7c306f20bd4f4b27ab0cf770d3bf8762e2 - pristine_git_object: 1a03ef7d1dd9e1d6b51f0f9391c46feb5cd822a8 + pristine_git_object: 813e1f3a79ad14eae55bbb1b96598d6260904d9d docs/models/modelconversationobject.md: id: 4c5699d157a9 last_write_checksum: sha1:8e2e82e1fa4cb97f8c7a8a129b3cc9cd651e4055 pristine_git_object: ead1fa26f5d9641a198a14b43a0f5689456e5821 docs/models/modelconversationtool.md: + id: 2dd28167bc36 last_write_checksum: sha1:9b33f73330e5ae31de877a904954efe342e99c4f + pristine_git_object: 8723556753d077969bc665a423c057ae4ceaa0d2 docs/models/modellist.md: id: ce07fd9ce413 last_write_checksum: sha1:b4c22b5eff4478ffa5717bd5af92ca79f4a90b01 - pristine_git_object: 760882c6c5b442b09bbc91f910f960138d6a00c8 + pristine_git_object: 85b20be7376f80cf169c25b3c7117079cd4c2828 docs/models/modellistdata.md: + id: e2eb639c646f last_write_checksum: sha1:7394ba5645f990163c4d777ebbfc71f24c5d3a74 + pristine_git_object: b44e84a00d0c54f8df78650d45de0a409c901048 docs/models/moderationobject.md: id: 4e84364835f5 last_write_checksum: sha1:2831033dcc3d93d32b8813498f6eb3082e2d3c4e @@ -1175,7 +1149,9 @@ trackedFiles: last_write_checksum: sha1:18e8f4b4b97cb444824fcdce8f518c4e5a27c372 pristine_git_object: 75a5eec74071fdd0d330c9f3e10dac0873077f20 docs/models/multipartbodyparams.md: + id: f5be2d861921 last_write_checksum: sha1:34e68e3795c7987138abd152177fa07198d2f6f6 + pristine_git_object: f14b95737fde09a120b35e2f922568ca31825bd5 docs/models/ocrimageobject.md: id: b72f3c5853b2 last_write_checksum: sha1:90c5158dec6a7b31c858677b6a8efa1e3cabd504 @@ -1204,8 +1180,14 @@ trackedFiles: id: 419abbb8353a last_write_checksum: sha1:6e717a3f3de3c464e8b3237f06867cdfecec339e pristine_git_object: d9d79125cb02bc2b09d8dc543a5e2d4a6c55571c + docs/models/orderby.md: + id: 9e749ed80f72 + last_write_checksum: sha1:6ec002e3e59f37002ccb14e347b790ca4daef773 + pristine_git_object: bba50df10855a8d6acdf4b061ec2ffeb0279fd7f docs/models/output.md: + id: 376633b966cd last_write_checksum: sha1:600058f0b0f589d8688e9589762c45a0dd18cc9b + pristine_git_object: d0ee0db93f56c40f6684fcfdb5873aba586bc876 docs/models/outputcontentchunks.md: id: f7e175c8e002 last_write_checksum: sha1:5094466110028801726cc825e8809f524fe1ee24 @@ -1229,9 +1211,11 @@ trackedFiles: docs/models/realtimetranscriptionerrordetail.md: id: ea137b1051f1 last_write_checksum: sha1:7e1d18760939d6087cda5fba54553141f8a78d1e - pristine_git_object: 96420ada2ac94fca24a36ddacae9c876e14ccb7a + pristine_git_object: 5b34755dc67359bb884d5c2387608686ee527470 docs/models/realtimetranscriptionerrordetailmessage.md: + id: d25137243bef last_write_checksum: sha1:f8c3a4984d647d64e8ea4e1e42654265ffe46b0f + pristine_git_object: da3764ef56337bdc773eaf8e9aa747cbd1b407e2 docs/models/realtimetranscriptionsession.md: id: aeb0a0f87d6f last_write_checksum: sha1:c3aa4050d9cc1b73df8496760f1c723d16183f3a @@ -1252,20 +1236,26 @@ trackedFiles: id: 0944b80ea9c8 last_write_checksum: sha1:956b270766c7f11fe99f4a9b484cc29c159e7471 pristine_git_object: 1e0e2fe64883ef5f3e628777b261b1224661d257 + docs/models/reprocessdocumentrequest.md: + id: 3c713aad474b + last_write_checksum: sha1:100b194196051470a2ae75cc2f707afec0c8d161 + pristine_git_object: cf3982a8cd76e4b2c8429acede0a12a044cbe2ca docs/models/requestsource.md: id: 8857ab6025c4 last_write_checksum: sha1:4b7ecc7c5327c74e46e2b98bd6e3814935cdecdf pristine_git_object: c81c115992439350d56c91d2e3351a13df40676b docs/models/response.md: + id: 583c991c7a30 last_write_checksum: sha1:f4a3ec06ff53cd1cbdf892ff7152d39fa1746821 + pristine_git_object: 3512b7a8f9fdfcaaed9a6db06ef4266629d9fa89 docs/models/responsedoneevent.md: id: 38c38c3c065b last_write_checksum: sha1:4ac3a0fd91d5ebaccce7f4098ae416b56e08416f - pristine_git_object: ec25bd6d364b0b4959b11a6d1595bdb57cba6564 + pristine_git_object: 63d4cc06493e1ca12cf0e8ef800acfc0bdc9a02d docs/models/responseerrorevent.md: id: 3e868aa9958d last_write_checksum: sha1:4711077bf182e4f3406dd12357da49d37d172b4c - pristine_git_object: 2ea6a2e0ec412ae484f60fa1d09d02e776499bb9 + pristine_git_object: 4309bdadc323918900cc4ca4fddb18788361d648 docs/models/responseformat.md: id: 50a1e4140614 last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add @@ -1275,21 +1265,37 @@ trackedFiles: last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f docs/models/responseretrievemodelv1modelsmodelidget.md: + id: 6143ec73bdd6 last_write_checksum: sha1:6bae62cbb18559065a53f0acdacb1f72f513467e + pristine_git_object: ffbc1473d39c8266bb6b05b37677c98ca1d10858 docs/models/responsestartedevent.md: id: 88e3b9f0aa8d last_write_checksum: sha1:156f38bbe8278f9c03117135938e7cbdae3038b9 - pristine_git_object: 481bd5bba67a524dbadf9f1570a28ae20ec9f642 + pristine_git_object: e2f421af866690b34c2d9fa4595a63e9172a65f5 docs/models/responsev1conversationsget.md: + id: 48d4a45780a9 last_write_checksum: sha1:8e75db359f0d640a27498d20c2ea6d561c318d7e + pristine_git_object: 844c5d610a9a351532d12b1a73f6c660059da76b + docs/models/restartconversationrequest.md: + id: b85b069aa827 + last_write_checksum: sha1:b7fb56a5561ab329f605d77795a610da8faaf561 + pristine_git_object: f24f14e67e749da884363038ca72891449cd99da + docs/models/restartconversationstreamrequest.md: + id: 65df276279f0 + last_write_checksum: sha1:907807c7e5969f82e70e743fddeb4c6f4278fc1a + pristine_git_object: daa661a9250701ad33241084d5033f73d75a9d6e docs/models/retrievefileout.md: id: 8e82ae08d9b5 last_write_checksum: sha1:600d5ea4f75dab07fb1139112962affcf633a6c9 pristine_git_object: 28f97dd25718833aaa42c361337e5e60488bcdc8 - docs/models/retrievemodelv1modelsmodelidgetrequest.md: - id: ac567924689c - last_write_checksum: sha1:7534c5ec5f1ae1e750c8f610f81f2106587e81a9 - pristine_git_object: f1280f8862e9d3212a5cfccd9453884b4055710a + docs/models/retrievefilerequest.md: + id: eac92ea7ca45 + last_write_checksum: sha1:c80772e3cfbe704385abe1b347d8e69d55bd9e00 + pristine_git_object: 454b9665b8134876488eb32c57a9dc45f4d972de + docs/models/retrievemodelrequest.md: + id: 392008b3324b + last_write_checksum: sha1:b9aafe10f0cd838a0b6959ec8dde5850ce59c55d + pristine_git_object: 787c3dd1000cba873c787fd5b9dcbe3c793f2b11 docs/models/sampletype.md: id: 0e09775cd9d3 last_write_checksum: sha1:33cef5c5b097ab7a9cd6232fe3f7bca65cd1187a @@ -1322,10 +1328,18 @@ trackedFiles: id: 6a902241137c last_write_checksum: sha1:567027284c7572c0fa24132cd119e956386ff9d0 pristine_git_object: ae06b5e870d31b10f17224c99af1628a7252bbc3 + docs/models/startfinetuningjobrequest.md: + id: 48fd313ae362 + last_write_checksum: sha1:f645c1e3e3244729eaa31aabb4b3ec0454fb114f + pristine_git_object: 9df5aee8f527fea4f0c9b02a28af77a65765be48 + docs/models/startfinetuningjobresponse.md: + id: 970045c710ff + last_write_checksum: sha1:78d230946abe19e928f286562ac589c7672c9854 + pristine_git_object: dce84c5a7711cd655a624b6ba0540504a6ff75d7 docs/models/systemmessage.md: id: fdb7963e1cdf last_write_checksum: sha1:561c3372391e093c890f477b3213c308ead50b81 - pristine_git_object: 0dba71c00f40c85e74b2c1967e077ffff9660f13 + pristine_git_object: dfb0cd0bd17aecbc1fe4b8410e78440f65038fef docs/models/systemmessagecontent.md: id: 94a56febaeda last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb @@ -1381,7 +1395,7 @@ trackedFiles: docs/models/toolexecutiondeltaevent.md: id: f2fc876ef7c6 last_write_checksum: sha1:ae1462a9b5cb56002b41f477ce262cb64ccf2f4e - pristine_git_object: 7bee6d831a92085a88c0772300bcad4ce8194edb + pristine_git_object: 7066f3485407707500e5006335279bfa37db8705 docs/models/toolexecutiondeltaeventname.md: id: 93fd3a3b669d last_write_checksum: sha1:d5dcdb165c220209ee76d81938f2d9808c77d4fc @@ -1389,7 +1403,7 @@ trackedFiles: docs/models/toolexecutiondoneevent.md: id: b604a4ca5876 last_write_checksum: sha1:6b6975ded0b0495b6c56250d153186c7818b5958 - pristine_git_object: 5898ea5eff103b99886789805d9113dfd8b01588 + pristine_git_object: b2d81be3cfa3e1dd0d1a58ef5ad16556c5e953c7 docs/models/toolexecutiondoneeventname.md: id: d19dc0060655 last_write_checksum: sha1:aa5677087e6933699135a53f664f5b86bbae5ac6 @@ -1397,9 +1411,11 @@ trackedFiles: docs/models/toolexecutionentry.md: id: 75a7560ab96e last_write_checksum: sha1:fdaa9abd5417486100ffc7059fcfdc8532935ed3 - pristine_git_object: 3678116df64ad398fef00bab39dd35c3fd5ee1f5 + pristine_git_object: adf88fb1acec13bf8016eb42d6bdc5fd3bd279b5 docs/models/toolexecutionentryname.md: + id: 86d537762559 last_write_checksum: sha1:6c528cdfbb3f2f7dc41d11f57c86676f689b8845 + pristine_git_object: fb762a5382d8b0e93dc2eb277f18adf810057c55 docs/models/toolexecutionentryobject.md: id: af106f91001f last_write_checksum: sha1:6df075bee4e84edf9b57fcf62f27b22a4e7700f4 @@ -1411,7 +1427,7 @@ trackedFiles: docs/models/toolexecutionstartedevent.md: id: 37657383654d last_write_checksum: sha1:47126a25c2a93583038ff877b85fc9ae1dcef9f3 - pristine_git_object: de81312bda08970cded88d1b3df23ebc1481ebf2 + pristine_git_object: c41c7258779f15f1f0436ad890f4947d780bfa75 docs/models/toolexecutionstartedeventname.md: id: be6b33417678 last_write_checksum: sha1:f8857baa02607b0a0da8d96d130f1cb765e3d364 @@ -1431,7 +1447,7 @@ trackedFiles: docs/models/toolmessage.md: id: 0553747c37a1 last_write_checksum: sha1:f35fa287b94d2c1a9de46c2c479dadd5dca7144d - pristine_git_object: a54f49332c2873471759b477fb4c712fa4fb61f5 + pristine_git_object: fa00d666d6d2baea0aac10fcdeff449eb73c9d39 docs/models/toolmessagecontent.md: id: f0522d2d3c93 last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee @@ -1463,13 +1479,15 @@ trackedFiles: docs/models/transcriptionsegmentchunk.md: id: f09db8b2273e last_write_checksum: sha1:5387f2595d14f34b8af6182c34efac4874a98308 - pristine_git_object: f620b96a75a0b9c6e015ae1f460dcccb80d113ee + pristine_git_object: 00a599ee8442f45ce4f529da18ad3e9486b12f9f docs/models/transcriptionsegmentchunktype.md: + id: 01bda77a53f8 last_write_checksum: sha1:63d511c2bd93bd477f1b7aae52954b28838316d9 + pristine_git_object: 2968fa26a2dd390b66974e6db57317616fb3b832 docs/models/transcriptionstreamdone.md: id: 2253923d93cf last_write_checksum: sha1:2a1910d59be258af8dd733b8911e5a0431fab5a4 - pristine_git_object: 9ecf7d9ca32410d92c93c62ead9674e097533ec3 + pristine_git_object: bca69a2b02e069ce240342d76ac408aec67993a9 docs/models/transcriptionstreamevents.md: id: d0f4eedfa2b6 last_write_checksum: sha1:ec6b992049bd0337d57baab56603b1fa36a0a35b @@ -1485,27 +1503,59 @@ trackedFiles: docs/models/transcriptionstreamlanguage.md: id: 5e9df200153c last_write_checksum: sha1:d5626a53dde8d6736bab75f35cee4d6666a6b795 - pristine_git_object: e16c8fdce3f04ae688ddc18650b359d2dd5d6f6f + pristine_git_object: 63fcfbc63a65cdff4228601e8a46f9d003ec9210 docs/models/transcriptionstreamsegmentdelta.md: id: f59c3fb696f2 last_write_checksum: sha1:4a031b76315f66c3d414a7dd5f34ae1b5c239b2e - pristine_git_object: 2ab32f9783f6645bba7603279c03db4465c70fff + pristine_git_object: e0143a39fb12a4a3efce3e1b250730d20cf21c7d docs/models/transcriptionstreamtextdelta.md: id: 69a13554b554 last_write_checksum: sha1:de31f5585d671f85e6a9b8f04938cf71000ae3f7 - pristine_git_object: adddfe187546c0161260cf06953efb197bf25693 + pristine_git_object: a4062171d7630bcea967a89d8df6cffd4908285f docs/models/unarchiveftmodelout.md: id: 4f2a771b328a - last_write_checksum: sha1:b3be8add91bbe10704ff674891f2e6377b34b539 - pristine_git_object: 287c9a007e0b2113738a1884450133558d23540e - docs/models/unarchiveftmodeloutobject.md: - id: 5fa9545c3df0 - last_write_checksum: sha1:29c0a228082142925a0fd72fef5a578f06ac764d - pristine_git_object: 623dcec24e2c676c9d50d3a3547b1dd9ffd78038 + last_write_checksum: sha1:0b9ab5d6c7c1285712127cfac9e918525303a441 + pristine_git_object: 12c3d74534897129766397a44afee0f4dac91d9f + docs/models/unarchivemodelrequest.md: + id: e6922871c93a + last_write_checksum: sha1:591461141df5089e884a2db13bfaaef1def0748c + pristine_git_object: 033dad8a66969e2b920ec40391c38daa658c6f0e + docs/models/updateagentrequest.md: + id: 371bfedd9f89 + last_write_checksum: sha1:f9ebaa4650f77595fd554bb2711d4b869cba06cc + pristine_git_object: 358cb71d2ab7dfae85ac7768936910a976d2f644 + docs/models/updateagentversionrequest.md: + id: 706f66fb34eb + last_write_checksum: sha1:913a8105b77620d32147a00c1223ce5a117d2df2 + pristine_git_object: b83eb867a518d757b23d981c962f87a0e9c8a454 + docs/models/updatedocumentrequest.md: + id: ee4e094a6aa7 + last_write_checksum: sha1:4798ef091b5d045b0cda3d2a3cc40aef0fb3155c + pristine_git_object: fa5d117a4016208d81ad53f24daa4284b35152f8 docs/models/updateftmodelin.md: id: 1b98d220f114 last_write_checksum: sha1:d1c7a8f5b32228d8e93ad4455fccda51b802f08f pristine_git_object: 4e55b1a7d96e1ad5c1e65c6f54484b24cd05fcfc + docs/models/updatelibraryrequest.md: + id: 2eda82f12f31 + last_write_checksum: sha1:cc1ca5b6f9bd4ab61e3983991f5656ff5ea22e8d + pristine_git_object: e03883cca75f3ed17fa3432e0abc2c892ec3d74a + docs/models/updatemodelrequest.md: + id: 8eabdced3e0e + last_write_checksum: sha1:28765fe537adb34e5e2ef051cd1226bdcae8ea9f + pristine_git_object: 5799c63babcd9377c5024f584328c814c4401c04 + docs/models/updatemodelresponse.md: + id: 742d796d5be3 + last_write_checksum: sha1:2e09ab747fa3247486b25057e887baf0859c3a5b + pristine_git_object: 275ee77f111b926d681a446af9741001a1c88fa8 + docs/models/updateorcreatelibraryaccessrequest.md: + id: c95e6b3df38f + last_write_checksum: sha1:f957324978f18d9831dafe4d1a5d78f755f51ed6 + pristine_git_object: e04567b40d62e0d705096eedaba9fa84913f584d + docs/models/uploaddocumentrequest.md: + id: a211b5f814e4 + last_write_checksum: sha1:ce851cd52da0250c8d86f1346778edb0b5c97a50 + pristine_git_object: 92152b7f247ae4d7f8373e8b13ce947b7ca2cae7 docs/models/uploadfileout.md: id: c991d0bfc54c last_write_checksum: sha1:ce5af8ffadb8443a6d1ca5fbbc014de42da35b9d @@ -1517,7 +1567,7 @@ trackedFiles: docs/models/usermessage.md: id: ed66d7a0f80b last_write_checksum: sha1:627f88dbb89e226a7d92564658c23a0e8d71342a - pristine_git_object: 63b0131091cd211b3b1477c1d63b5666a26db546 + pristine_git_object: 78ed066eed9f0638edc6db697eaeaad6f32b4770 docs/models/usermessagecontent.md: id: 52c072c851e8 last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a @@ -1533,69 +1583,75 @@ trackedFiles: docs/models/wandbintegration.md: id: ba1f7fe1b1a3 last_write_checksum: sha1:ef35648cec304e58ccd804eafaebe9547d78ddcf - pristine_git_object: 199d2eddc61069c80b628a12bff359ac2abc7338 + pristine_git_object: c73952d9e79ea8e08bc1c17817e74e3650def956 docs/models/wandbintegrationout.md: id: c1a0f85273d8 last_write_checksum: sha1:ce7ffc6cc34931b4f6d2b051ff63e1ca39e13882 - pristine_git_object: cec02ed87555128e6027e00f3385a61028694ac0 + pristine_git_object: a6f65667a6bcfb18b78f8f766ab71de84ca13ca7 docs/models/websearchpremiumtool.md: id: 267988aa8c3f last_write_checksum: sha1:f9b761d727cbe0c60a2d0800b0a93929c5c3f5e7 - pristine_git_object: 941fc2b8448d4caeae9318fdf08053a2b59a9bee + pristine_git_object: 07b8b9265e01bd28b1c30fbc3f1283285e7d6edd docs/models/websearchtool.md: id: fc4df52fb9b5 last_write_checksum: sha1:047fd9f950d5a86cf42a8f3ac40f754b395e39ec - pristine_git_object: c8d708bdcdbfc387a09683bdd47ebabedd566cb0 + pristine_git_object: da5e7b7b600fa3fd0799e95e7a0f9507cd8456c3 docs/sdks/accesses/README.md: id: 2ea167c2eff2 - last_write_checksum: sha1:22bd7a11d44295c2f433955604d3578292f26c99 - pristine_git_object: 64a1e749aeb6f2c32497a72a649ecc2b7549c077 + last_write_checksum: sha1:200d509484a1a27fec893e15c39043a9deb140da + pristine_git_object: c1e3866d1a37e1596fa61538317eb68907cbaf57 docs/sdks/agents/README.md: id: 5965d8232fd8 last_write_checksum: sha1:a655952f426d5459fa958fa5551507e4fb3f29a8 - pristine_git_object: 75efc492c4114417c22a796824ee971e9180104e + pristine_git_object: cd3ec4c6c87f34c4d3634bf510534dff163d97de docs/sdks/batchjobs/README.md: - last_write_checksum: sha1:212bc82280a58f896172d173e5be516b926bc11c + id: a3b8043c6336 + last_write_checksum: sha1:eca07f3c47acbe42264d31fba982a49005a8c983 + pristine_git_object: 24316d78b1be51649d186db1479bbf74f00f87e6 docs/sdks/betaagents/README.md: - last_write_checksum: sha1:131f220aefaff8a3ca912df661199be7a88d50ca + id: 5df79b1612d8 + last_write_checksum: sha1:f2dbb543e7bd1db239ee801c55fa1f7f92ca6322 + pristine_git_object: 0ef655a348d7381aa0a7869a022b362d90497197 docs/sdks/chat/README.md: id: 393193527c2c last_write_checksum: sha1:908e67969e8f17bbcbe3697de4233d9e1dd81a65 - pristine_git_object: 89c4fffbb777427723307b13c124668601ff5839 + pristine_git_object: 6907c29d26b51fa7748b339cc73fd3d6d11a95a5 docs/sdks/classifiers/README.md: id: 74eb09b8d620 last_write_checksum: sha1:f9cc75dbb32ea9780a9d7340e524b7f16dc18070 - pristine_git_object: 634ee419f3334ba50dd25f0e2340c32db1ec40b3 + pristine_git_object: 41b520812ac8a6031c0ab32aa771e9903fa24a97 docs/sdks/conversations/README.md: id: e22a9d2c5424 - last_write_checksum: sha1:f55def6eaab9fcbed0e86a4dee60e5c2656f0805 - pristine_git_object: acd43cdb63edd23665e808aaccc6ab3a4dc3dc85 + last_write_checksum: sha1:55b150757576819887075feac484ba76ae8abd59 + pristine_git_object: c0089f12b040f3686a584f1569ed4e0ab56c52fb docs/sdks/documents/README.md: id: 9758e88a0a9d - last_write_checksum: sha1:d9bcb4bf6c2189c282844f81b456fb29654e384c - pristine_git_object: d90e7ee7aab234cb992a904088cbbf2e57dd0baa + last_write_checksum: sha1:55280d8863200affd25a98d7493a0110c14baad3 + pristine_git_object: 97831f86223c6dbbaec35a240725a8c72e229961 docs/sdks/embeddings/README.md: id: 15b5b04486c1 last_write_checksum: sha1:46e57c7808ce9c24dd54c3562379d2ff3e0526e8 pristine_git_object: 0be7ea6dcace678d12d7e7e4f8e88daf7570df5d docs/sdks/files/README.md: id: e576d7a117f0 - last_write_checksum: sha1:22298532be84a02d4fc8a524d6baa4fab0adcec4 - pristine_git_object: 44c39f8a3bd783b5c592e4f22c453bd76cef434a + last_write_checksum: sha1:92558cd6688432150cc433391e2b77a328fa3939 + pristine_git_object: ae29b7bf9383f534b2ca194ec5ff261ff17b5fb6 docs/sdks/fim/README.md: id: 499b227bf6ca last_write_checksum: sha1:34ff7167b0597bf668ef75ede016cb8884372d1b pristine_git_object: 3c8c59c79db12c916577d6c064ddb16a511513fd docs/sdks/finetuningjobs/README.md: - last_write_checksum: sha1:58b5ecea679eab1691f0002c7d3323170d73357b + id: 03d609f6ebdd + last_write_checksum: sha1:206624c621a25836333f4c439e0247beb24a7492 + pristine_git_object: fe18feeb640804d9308e6fefe9b5f2371d125f9b docs/sdks/libraries/README.md: id: df9a982905a3 - last_write_checksum: sha1:0c710c0395906333b85bedd516cfca7dcb3b9b42 - pristine_git_object: bbdacf0538c6c055fef0c0109aac163e987a3dd5 + last_write_checksum: sha1:1c623647aa7b834a844e343c9e3fe0763c8445a5 + pristine_git_object: 8835d0ec8cbabcb8ab47b39df982a775342c3986 docs/sdks/models/README.md: id: b35bdf4bc7ed - last_write_checksum: sha1:37ac4b52ddcdbe548d478aed5fd95091a38b4e42 - pristine_git_object: 6fa28ca2e25c0b2f3fbf044b706d19f01193fc3c + last_write_checksum: sha1:2410579fd554ad1e5734cc313d0a75eeb04a1d14 + pristine_git_object: 0cbf1bdde52d1a52c1329ecd1116718237be5152 docs/sdks/ocr/README.md: id: 545e35d2613e last_write_checksum: sha1:a8d22a86b79a0166ecec26a3e9379fa110d49b73 @@ -1614,1242 +1670,1252 @@ trackedFiles: pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 src/mistralai/client/__init__.py: id: f1b791f9d2a5 - last_write_checksum: sha1:36306d1d404b6aeb912d27f1d9c52f098ff7bf9b - pristine_git_object: dd02e42e4cc509dc90e6ae70493054021faa5f9c + last_write_checksum: sha1:fcca936cb62cc76d57372d5bd5735877b79b53a4 + pristine_git_object: 481fc91604c413966c8510d8341edaa3355fc276 src/mistralai/client/_hooks/__init__.py: id: cef9ff97efd7 - last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d - pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 + last_write_checksum: sha1:9a6f060871150610f890cc97676c3afe9050b523 + pristine_git_object: 66a04e3727ffcc2c427d854cdbb4f5f340af050f src/mistralai/client/_hooks/sdkhooks.py: id: ed1e485b2153 - last_write_checksum: sha1:5688b56bf910f5f176bcacc58f4ad440ac2fa169 - pristine_git_object: c9318db481df2293b37e9b964da417ee5de86911 + last_write_checksum: sha1:e592d5ab277827b988257b4df3e746508ca91b23 + pristine_git_object: ecf94240a5689c8b248add46509bc7a7982d8437 src/mistralai/client/_hooks/types.py: id: 85cfedfb7582 - last_write_checksum: sha1:ea20450ab595abb6ad744ecbd58927e8fa1ce520 - pristine_git_object: e7e1bb7f61527de6095357e4f2ab11e342a4af87 + last_write_checksum: sha1:40294e852f818a974034c33e510e0f8723fcaf31 + pristine_git_object: 036d44b8cfc51599873bd5c401a6aed30450536c src/mistralai/client/_version.py: id: cc807b30de19 - last_write_checksum: sha1:c808e81ad8b454d646101b878105d109d74ba6ad - pristine_git_object: 8c5d6e54860c69881bf976887910fc32d183c6e5 + last_write_checksum: sha1:dd6d1521f7ecfc56be58eafc1709873a04d27fb0 + pristine_git_object: 814d9ec74a37ae50f106ea07b3c174e65685521b src/mistralai/client/accesses.py: id: 76fc53bfcf59 - last_write_checksum: sha1:da6c930bfec52d4cc344408f0aaef2874705fa68 - pristine_git_object: 307c7156626e735c802c149ea3547648ea03da09 + last_write_checksum: sha1:16574ca54176ec30b236ab1a4694f57a6314db43 + pristine_git_object: cda484c8feade66829dad587f5f397aa89d4fb6f src/mistralai/client/agents.py: id: e946546e3eaa - last_write_checksum: sha1:0ff47f41f9224c1ef6c15b5793c04a7be64f074b - pristine_git_object: c04abd21b5b7cb9b8ddfdb52ec67fffa7d21759a + last_write_checksum: sha1:3b46ac68d37563a9eb988ad2978083e40cf4513d + pristine_git_object: 0942cb20173f0b2e3f828f5857e3aa221f65bc1b src/mistralai/client/audio.py: id: 7a8ed2e90d61 - last_write_checksum: sha1:941d0466d9ff5d07c30a6e41cf4434857518963a - pristine_git_object: 2834ade22ab137b7620bfd4318fba4bdd9ef087f + last_write_checksum: sha1:e202d775d24c0303053e0548af83fcb04e2748f4 + pristine_git_object: f68f063c08a099d07904456daa76d8e2d2ecdbe6 src/mistralai/client/basesdk.py: id: 7518c67b81ea - last_write_checksum: sha1:c10ba4619b8c70ff876304a93b432d4466cb7112 - pristine_git_object: bddc9012f28f7881b75a720a07a3ad60845e472e + last_write_checksum: sha1:795253524d0911d227b934978bdacb84619177a3 + pristine_git_object: 611b40597b42ac309871681b38a3b3c249cbe494 src/mistralai/client/batch.py: id: cffe114c7ac7 - last_write_checksum: sha1:ed3cc7aee50879eca660845e51bb34912505d56a - pristine_git_object: d53a45fbcbeb7b1d8fb29c373101c9e2a586b877 + last_write_checksum: sha1:b452983f67b33f26e1faa60fdbbb171cb1877224 + pristine_git_object: 7e36fd0d73ebeb873f74f4109896a6cf3bb7d2ba src/mistralai/client/batch_jobs.py: - last_write_checksum: sha1:0ac09a2fcbf9f059cea8197b0961cd78603e9c9c + id: 3423fec25840 + last_write_checksum: sha1:eb1baade19f5da3dd815ebfbabccca139eb7b25d + pristine_git_object: 752c76524a4fa19ed1654943218ca5182d563ca3 src/mistralai/client/beta.py: id: 981417f45147 - last_write_checksum: sha1:538571fbb2b393c64b1e7f53d1e530d989717eb3 - pristine_git_object: b30003eae52be5e79838fe994cda8474068a43dc + last_write_checksum: sha1:85f42fc6c2318eef94c90405b985120220c9c617 + pristine_git_object: 65b761d18f7274cc33162a83efa5b33211f78952 src/mistralai/client/beta_agents.py: - last_write_checksum: sha1:295438e65ce0453cbb97988fb58d01263d88b635 + id: b64ad29b7174 + last_write_checksum: sha1:227c2ef3812c06e4a813063bf9d2282ce0884ecd + pristine_git_object: 4e692f17579635d5f0cc03f86b8158b3344ae87f src/mistralai/client/chat.py: id: 7eba0f088d47 - last_write_checksum: sha1:00d1ec46a2c964b39dae5f02e4d8adf23e5dcc21 - pristine_git_object: 6fa210bb01b193e1bd034431923a3d4dc8c8a16c + last_write_checksum: sha1:6f052ac3117829b16906a4e1cbfa5b1f7ab104fd + pristine_git_object: 35698d32ac870f4b59c03f02700f20c04b14462d src/mistralai/client/classifiers.py: id: 26e773725732 - last_write_checksum: sha1:3a65b39ad26b6d1c988d1e08b7b06e88da21bb76 - pristine_git_object: 537e2438afcb570a3e436ab4dd8b7d604b35b627 + last_write_checksum: sha1:abd5033ee390fdeddfa4af918cc44f6210a2a6a0 + pristine_git_object: 3407c4b77db429535465f29754a2da8145d6a5fe src/mistralai/client/conversations.py: id: 40692a878064 - last_write_checksum: sha1:d6b44a85ecf623d0257296d62b05f26742a2a2aa - pristine_git_object: 285beddbd175fee210b697d4714c28196c1fa7a2 + last_write_checksum: sha1:6e81283d3d5db5dd554af68d69313951cf5f4578 + pristine_git_object: 646b91f3980bbe9be01078162d5b4ad9afb141b9 src/mistralai/client/documents.py: id: bcc17286c31c - last_write_checksum: sha1:eb3d1d86cbc2e7e72176ff60370a9ad1d616e730 - pristine_git_object: 009a604f1c2fa367d14df7fb9f4078083c4be501 + last_write_checksum: sha1:9ae89ef80a636b55ba4cdc3ad6c77c47c1824433 + pristine_git_object: c78f2944edaac77864ff6c4dd8d19d3aab3f0cb6 src/mistralai/client/embeddings.py: id: f9c17258207e - last_write_checksum: sha1:a3fa049388bf794ed764a1a8b6736f6c29136c83 - pristine_git_object: 359f2f621d1628536b89f66115726064db34e51b + last_write_checksum: sha1:7cd6d848ed8978637988d9b7e1a7dd92dac5eb3b + pristine_git_object: 4a056baa014217927412e9dd60479c28de899e2e src/mistralai/client/files.py: id: f12df4b2ce43 - last_write_checksum: sha1:577d731e40683b309a4848d8534185e738e54d31 - pristine_git_object: 97817eab1a4b0a0649a128b06a9f3ff4077dffa5 + last_write_checksum: sha1:aa647afa486bbed48083c0b1ec954bdc5cfd0280 + pristine_git_object: 57d389f1e245f5768fe9e8991f65229dd4bd608d src/mistralai/client/fim.py: id: 217bea5d701d - last_write_checksum: sha1:d62f3bee1322a41aefc0cc01aa8313e8b7e3ae1b - pristine_git_object: 4a834fe93a9b9a8af30f681c9541a7cef0a513e1 + last_write_checksum: sha1:90cacb025a1a1fb81e619d59819c0a652f4a5efa + pristine_git_object: be3f7742b866ac58b7bbb65e3593e9865dee134f src/mistralai/client/fine_tuning.py: id: 5d5079bbd54e - last_write_checksum: sha1:e420e8df4b265b95696085585b1b213b9d05dee4 - pristine_git_object: c57425fdf3225eaeccee47a17db198a3974995a3 + last_write_checksum: sha1:fe1f774df4436cc9c2e54ed01a48db573eb813cd + pristine_git_object: df6bc5643a13294ddfbeecc6ae84d00cd7199bed src/mistralai/client/fine_tuning_jobs.py: - last_write_checksum: sha1:4dc213f6b47379bd76c97c8fc62a4dc23acbb86e + id: fa1ea246e0b2 + last_write_checksum: sha1:edfe25f99047d4cbd45222cd23823c782286a2c8 + pristine_git_object: 9a28ded152a4f4a5b625a97e087aebc5a287d71e src/mistralai/client/httpclient.py: id: 3e46bde74327 - last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 - pristine_git_object: 89560b566073785535643e694c112bedbd3db13d + last_write_checksum: sha1:0f4ecc805be1dc3d6e0ca090f0feb7d988f6eb9d + pristine_git_object: 544af7f87d6b7097935290bebd08e30e5f485672 src/mistralai/client/libraries.py: id: d43a5f78045f - last_write_checksum: sha1:5264a24b973f49b4ea7252868f4a76baba9093b4 - pristine_git_object: 03a547410e042c19329ea9a91eef1bf25ecdcbe1 + last_write_checksum: sha1:b3fd0348f4f56aab9873d09c45ed9575baf6e7c3 + pristine_git_object: 26ceabe19a340b7fd4dbb74aebab62bc45093ae5 src/mistralai/client/models/__init__.py: id: e0e8dad92725 - last_write_checksum: sha1:1b4b7b007a50570b4592f6121d6fa5556cecae4b - pristine_git_object: 23e652220f29a882748661a8c0d21aa2830471bf + last_write_checksum: sha1:d047eab2a2a8ee5af65ed19055a0a3e3092ad2c5 + pristine_git_object: 093ffcbdb0b57458cf856f585e6637d7d5955e8d src/mistralai/client/models/agent.py: id: 1336849c84fb - last_write_checksum: sha1:39fca92a9cb4fea59a01b6ce883b1c17395978f8 - pristine_git_object: 3bedb3a3a71c116f5ccb0294bc9f3ce6690e47b2 + last_write_checksum: sha1:d41a96558ddbd52b6c71d316c291847bb6131a01 + pristine_git_object: 05ae24cde5149e30004b7cd4a2409c753682be56 src/mistralai/client/models/agentaliasresponse.py: id: 3899a98a55dd - last_write_checksum: sha1:6dfa55d4b61a543382fab8e3a6e6d824feb5cfc7 - pristine_git_object: 4bc8225c0217f741328d52ef7df38f7a9c77af21 + last_write_checksum: sha1:d7e12ea05431361ad0219f5c8dee11273cd60397 + pristine_git_object: 6972af2a4ae846e63d2c70b733ecd6c8370ee0cd src/mistralai/client/models/agentconversation.py: id: 1b7d73eddf51 - last_write_checksum: sha1:2624deece37e8819cb0f60bbacbbf1922aa2c99c - pristine_git_object: 5dfa8c3137c59be90c655ba8cf8afb8a3966c93a + last_write_checksum: sha1:bc2f1a3710efc9c87d6796ccce953c9ce9cf3826 + pristine_git_object: a850d54c64de0c84ad4ea2b11ea1a828eb2580c4 src/mistralai/client/models/agentcreationrequest.py: id: 35b7f4933b3e - last_write_checksum: sha1:99456f8e6d8848f2cebbd96040eefbce73c9c316 - pristine_git_object: 61a5aff554f830ab9057ce9ceafc2ce78380290f + last_write_checksum: sha1:d3f61940b4cccfc9c13860844f4115e60b095823 + pristine_git_object: 898d42a9c16ffe893792e14445e9ebfcbd046ba3 src/mistralai/client/models/agenthandoffdoneevent.py: id: 82628bb5fcea - last_write_checksum: sha1:151a49e8a7f110123fd0a41e723dfdb6055e9a8e - pristine_git_object: c826aa5e1f2324cddb740b3ffc05095ff26c666d + last_write_checksum: sha1:537e9f651de951057023d3712fa1820da17a21b4 + pristine_git_object: 40bf84970e1d245c3c7fbad64d73f648f8287438 src/mistralai/client/models/agenthandoffentry.py: id: 5030bcaa3a07 - last_write_checksum: sha1:86622620c14e2aacbdcc47b9772a3b9bb4127018 - pristine_git_object: 0b0de13f8840e9ab221ea233040ca03241cba8b7 + last_write_checksum: sha1:afe800c64c74aa79fceda4e4ce808f67573edbc7 + pristine_git_object: b18fe17c70d561b926bdac04124ebca8fc1cca0b src/mistralai/client/models/agenthandoffstartedevent.py: id: 2f6093d9b222 - last_write_checksum: sha1:ba4e40a4791bad20a4ac7568e32e34f6f00cfe24 - pristine_git_object: 4b8ff1e5e3639fb94b55c0a417e9478d5a4252b2 - src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py: - id: 23a832f8f175 - last_write_checksum: sha1:9ca9a0be2db68005fc0dec3f24d24fccf8d0c631 - pristine_git_object: 33da325cadf36ce8162bac11f1576872bcbbdbd6 - src/mistralai/client/models/agents_api_v1_agents_deleteop.py: - id: 95adb6768908 - last_write_checksum: sha1:9118fb084668440cec39ddd47b613fb4cd796c8d - pristine_git_object: 58fe902f0a51b50db869dfa760f1a3a4cba36342 - src/mistralai/client/models/agents_api_v1_agents_get_versionop.py: - id: ef9914284afb - last_write_checksum: sha1:d9b429cd8ea7d20050c0bc2077eec0084ed916b6 - pristine_git_object: edcccda19d5c3e784a227c6356285ee48be3d7f2 - src/mistralai/client/models/agents_api_v1_agents_getop.py: - id: f5918c34f1c7 - last_write_checksum: sha1:efdd7bed8ae19047b48c16c73099d433725181ab - pristine_git_object: d4817457a33d49ddaa09e8d41f3b03b69e8e491e - src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py: - id: a04815e6c798 - last_write_checksum: sha1:7bd6ba32e2aeeee4c34f02bab1d460eb384f9229 - pristine_git_object: b9770fffe5be41579f12d76f41a049e8b41b3ef8 - src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py: - id: 19e3310c3907 - last_write_checksum: sha1:62b3b94ad3ed412f74cfc75572a91b7f3cd6b39b - pristine_git_object: 813335f9e972c976f0e887d1f26be3c224b36b0c - src/mistralai/client/models/agents_api_v1_agents_listop.py: - id: 25a6460a6e19 - last_write_checksum: sha1:586ad2257e4a2c70bdb6d0044afe7d1b20f23d93 - pristine_git_object: 119f51236dda0769ab3dc41a9dbbb11b5d5e935d - src/mistralai/client/models/agents_api_v1_agents_update_versionop.py: - id: 63f61b8891bf - last_write_checksum: sha1:b214f6850347e4c98930ef6f019fdad52668c8c0 - pristine_git_object: 116f952b2ba2a7dca47975a339267c85122cd29a - src/mistralai/client/models/agents_api_v1_agents_updateop.py: - id: bb55993c932d - last_write_checksum: sha1:28cd6d0b729745b2e16d91a5e005d59a6d3be124 - pristine_git_object: 116acaa741f79123e682db0be2adbb98cf8283d8 - src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py: - id: ec00e0905f15 - last_write_checksum: sha1:67967a775c3a1ec139ccd6991465ea15327e3ba7 - pristine_git_object: 9f00ffd4b484f03dae6e670d019f61a4392afc85 - src/mistralai/client/models/agents_api_v1_conversations_appendop.py: - id: 39c6125e850c - last_write_checksum: sha1:93621c5ea8fbc5c038c92596b7d4c0aef0a01e2f - pristine_git_object: 13d07ba91207f82dcea8f58c238cc743cd6c3964 - src/mistralai/client/models/agents_api_v1_conversations_deleteop.py: - id: 0792e6abbdcb - last_write_checksum: sha1:dc60f272fed790bec27c654da0fb185aab27ff82 - pristine_git_object: 81066f90302d79bc2083d1e31aa13656c27cc65f - src/mistralai/client/models/agents_api_v1_conversations_getop.py: - id: c530f2fc64d0 - last_write_checksum: sha1:89088ac683d6830ffd4f649c25ccfb60a4b094de - pristine_git_object: c919f99e38148fb9b2d51816d0dd231ee828b11d - src/mistralai/client/models/agents_api_v1_conversations_historyop.py: - id: 2f5ca33768aa - last_write_checksum: sha1:9f33f183cd07b823b4727662ea305c74853049c5 - pristine_git_object: ba1f8890c1083947e4d6882dff2b50b3987be738 - src/mistralai/client/models/agents_api_v1_conversations_listop.py: - id: 936e36181d36 - last_write_checksum: sha1:e528bf06983dd0b22a0b0bc1d470b344e85db434 - pristine_git_object: bb3c7127c4b43019405689dc2ae10f5933c763bc - src/mistralai/client/models/agents_api_v1_conversations_messagesop.py: - id: b5141764a708 - last_write_checksum: sha1:0be49e2ad8a3edb079ce4b1f092654c7a6b7e309 - pristine_git_object: e05728f2c2c0a350bdaf72fe9dc488c923230ab7 - src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py: - id: c284a1711148 - last_write_checksum: sha1:ef22ebf2e217ab41ce0b69cf388122ee18ad7b05 - pristine_git_object: 9b489ab46486cc37349d64a4fc685f1355afb79a - src/mistralai/client/models/agents_api_v1_conversations_restartop.py: - id: 3ba234e5a8fc - last_write_checksum: sha1:e7e22098d8b31f5cc5cb0e8fafebe515842c2f88 - pristine_git_object: 8bce3ce519a69a6d1cb36383b22fb801768c4868 + last_write_checksum: sha1:933f8be5eacd86881a42cfb83612f327caa77ee7 + pristine_git_object: e278aef39d3bc5e158a094c593391fa8ad77c320 src/mistralai/client/models/agentscompletionrequest.py: id: 3960bc4c545f - last_write_checksum: sha1:d22d3513e2b391127df2202ca50b1fb9de605103 - pristine_git_object: 22368e44adb1b3ecff58d2b92592710335a062b9 + last_write_checksum: sha1:ee1e60d894d3a9277c1a3970c422483ffa502e21 + pristine_git_object: f4a2d646927c8c0f250507f52c5e7515830759ad src/mistralai/client/models/agentscompletionstreamrequest.py: id: 1b73f90befc2 - last_write_checksum: sha1:02fd1cf62fc203635099ad60fb9b41e82a82e0f8 - pristine_git_object: 37d46c79d8964d799679413e14122a5146799eb6 + last_write_checksum: sha1:3bc4976eeda6d9b30bba72e7f7c417ca9ba885c5 + pristine_git_object: 732e2402190d40bc5360868d3048d57fff9e7b55 src/mistralai/client/models/agentupdaterequest.py: id: 2d5a3a437819 - last_write_checksum: sha1:65fdf42d54199ad3b951089bdea26deca0134440 - pristine_git_object: 261ac069ce4e2b630d39080edf47bf2ad510ffb4 + last_write_checksum: sha1:4a0ef549756904749a36b580cc2296a6a54d6647 + pristine_git_object: 96e209d41b638002f129ec4c13748082ccc3a8db src/mistralai/client/models/apiendpoint.py: id: 00b34ce0a24d - last_write_checksum: sha1:0a1a08e7faaa7be804de952248b4f715c942af9a - pristine_git_object: a6072d568e08ab1f5e010d5924794adfb2188920 + last_write_checksum: sha1:733e852bf75956acd2c72a23443627abfa090b7b + pristine_git_object: a6665c1076f05c28936510c24ee7d3498d7e7a24 + src/mistralai/client/models/appendconversationop.py: + id: 1c47dd1e7c7e + last_write_checksum: sha1:109ced509e3caa5e5c9610b3a18839d113be708a + pristine_git_object: 710b8e1ca3fbfbb747e48d7699588bc199a41274 + src/mistralai/client/models/appendconversationstreamop.py: + id: 1ab08b189e9d + last_write_checksum: sha1:edd2a91da29f83646538b57e4d29f006d31f9dec + pristine_git_object: 55efca0e32c5d54d100563500aee9b61952d07c2 src/mistralai/client/models/archiveftmodelout.py: id: bab499599d30 - last_write_checksum: sha1:352eb0aca8368d29ef1b68820540363e8fa69be4 - pristine_git_object: 6108c7e153abecfc85be93b6fa1f9f22480f6d9b + last_write_checksum: sha1:92f5b5a89ae5e52523d30069629e0ac8dc858d6b + pristine_git_object: 3107116c4a2c78c591999b220349325612a19b4e + src/mistralai/client/models/archivemodelop.py: + id: beefa1df3b7c + last_write_checksum: sha1:6f78b2f84f42267d4928a5a3ad1d3d3cae417cac + pristine_git_object: 30b4a9bd71f349cc4ab4b12df73770d327008527 src/mistralai/client/models/assistantmessage.py: id: 2b49546e0742 - last_write_checksum: sha1:235a0f8d14b3100f5c498a9784ddda1f824a77a9 - pristine_git_object: 3ba14ce78e01c92458477bb025b9e5ded074fd4d + last_write_checksum: sha1:a58ecb7bc381af02d83247f0518a3d34013b4575 + pristine_git_object: 5a4a2085e838196d3ab2b1c00bbeb7a78516dfb2 src/mistralai/client/models/audiochunk.py: id: ce5dce4dced2 - last_write_checksum: sha1:5b7ef3c96f0d8b240d1a7354379dbebd911604c3 - pristine_git_object: 80d836f27ae65f30c6ca0e1d4d5d585bbf498cfd + last_write_checksum: sha1:8218d4c7118f677f16a3a63c55348c40d3ab3156 + pristine_git_object: a51868279b9b4ce2d97990286512d69f8d7f2e82 src/mistralai/client/models/audioencoding.py: id: b14e6a50f730 - last_write_checksum: sha1:8c8d6c1da3958200bf774313c485189426439545 - pristine_git_object: 557f53ed7a90f05e5c457f8b217d3df07e113e0b + last_write_checksum: sha1:92ca06dce513cd39b2c7d9e5848cf426b40598ce + pristine_git_object: 67fec75d72845b8dda774c96767a0b233f269fe5 src/mistralai/client/models/audioformat.py: id: c8655712c218 - last_write_checksum: sha1:baef21b264f77117bbaa1336d7efefae916b9119 - pristine_git_object: 7ea10b3ad610aa1500fd25500ff942988ea0e1db + last_write_checksum: sha1:8ee7b564d106b601b6ad8a9321c27dfff421ce5e + pristine_git_object: fef87ae76b31128ebd5ced4278e274c249181c23 src/mistralai/client/models/audiotranscriptionrequest.py: id: e4148b4d23e7 - last_write_checksum: sha1:52c245a739864ca838d4c4ef4bdf74e7b0c60f2e - pristine_git_object: 78a3797882841a6fd1251d72756f6b75f6d01006 + last_write_checksum: sha1:6d7b267bc241c1f72b5b7839d6e2ad76a4c1ecff + pristine_git_object: 8c47a83cada33d8dbd4a9ffdedb55d3f4f55dadf src/mistralai/client/models/audiotranscriptionrequeststream.py: id: 33a07317a3b3 - last_write_checksum: sha1:e468052c9ab8681ff0e1121e61aff406fc4427fc - pristine_git_object: 350643614e23002bc55e99e2d1807bedd80a0613 + last_write_checksum: sha1:66ae6146b9b75600df720054ec2c21e4e996b1fe + pristine_git_object: a080cee24c0d67c38fc6785c440418580e097700 src/mistralai/client/models/basemodelcard.py: id: 556ebdc33276 - last_write_checksum: sha1:6ebd9dd362ad23d34eb35451af01897662854726 - pristine_git_object: 8ce7f139b6018c4a7358a21534532cd3e741fa8a + last_write_checksum: sha1:94871ce94c92fbbcff9fa5d6a543c824b17ee13b + pristine_git_object: 17a3e5c93339082f408f9ab5f34b5a01e24c74e0 src/mistralai/client/models/batcherror.py: id: 1563e2a576ec - last_write_checksum: sha1:239f9c44477941c45a3e7fe863828299d36267d6 - pristine_git_object: a9c8362bfa08ab4727f08a6dd2b44a71040560f7 + last_write_checksum: sha1:9b59325428680d58151696c1738ad5466d67a78d + pristine_git_object: c1bf722a45c4326f24f7fd34ea536d59c48c67f2 src/mistralai/client/models/batchjobin.py: id: 72b25c2038d4 - last_write_checksum: sha1:0064f199b6f27b5101f6a9abf0532f61c522e2c8 - pristine_git_object: 39cf70b5bdf8db8adaa5c9d1dd8a227b2365879b + last_write_checksum: sha1:667020377b2ca85dfd9c5aed96c7d4ba2571093b + pristine_git_object: a0c3b9146130a5ebfdbd0ec6338092bacc01bf85 src/mistralai/client/models/batchjobout.py: id: cbf1d872a46e - last_write_checksum: sha1:44a92b4f427b77db29294a3b6d375f8622660ee1 - pristine_git_object: 008d43b4340cf8853fac751fb6f15525f765fe39 + last_write_checksum: sha1:9031bc5ff1986ddc283551f7f5d210c9de67cc56 + pristine_git_object: 99c2b95118364d660f0cefde16507a83e8c9cafe src/mistralai/client/models/batchjobsout.py: id: 20b2516e7efa - last_write_checksum: sha1:7d4223363e861137b9bce0dc78460c732a63c90b - pristine_git_object: 2654dac04c126a933f6d045f43f16a30263750dc + last_write_checksum: sha1:426287f6ef9ed88e75f9e318582627d066f9e4f0 + pristine_git_object: f65fc040a964c68c82b5df7d3fb9e40222182322 src/mistralai/client/models/batchjobstatus.py: id: 61e08cf5eea9 - last_write_checksum: sha1:f90059b4aaead197100965c648114254e7dc4888 - pristine_git_object: 4b28059ba71b394d91f32dba3ba538a73c9af7a5 + last_write_checksum: sha1:78934183519948464385245cbc89efb68ac00bfb + pristine_git_object: bd77faa2fbed74b19a8d3884af6d43bc1b4806e0 src/mistralai/client/models/batchrequest.py: id: 6f36819eeb46 - last_write_checksum: sha1:0ce0e6982c96933e73a31c6ebfb29f78b6ebf13b - pristine_git_object: 24f50a9af9a74f6bec7e8903a966d114966a36d3 + last_write_checksum: sha1:115df324d1fec164bae60bf4b37acfa5149b3172 + pristine_git_object: 41c4523456398b302e0b7eb35824efc014f03aa6 src/mistralai/client/models/builtinconnectors.py: id: 2d276ce938dc - last_write_checksum: sha1:50d2b60942ca1d7c9868ce59bf01ed860c09f313 - pristine_git_object: 6a3b2476d54096722eb3e7a271629d108028bd35 + last_write_checksum: sha1:4ceb3182009b6535c07d652ccf46661b553b6272 + pristine_git_object: ecf60d3c1a83028d9cf755d4c9d5459f6b56e72a + src/mistralai/client/models/cancelbatchjobop.py: + id: cebac10b56a9 + last_write_checksum: sha1:2614180488e51c0e701fffdb058b39892c5bc1e5 + pristine_git_object: cd94ee86467247fe2bc7f7381fa05b57bedabef1 + src/mistralai/client/models/cancelfinetuningjobop.py: + id: c9a1b39f0d02 + last_write_checksum: sha1:139d3c443678aeeb8afedea8b2a783210e5ac28c + pristine_git_object: ddd445bb433df9a0f987693d97088d79e5e8c47f src/mistralai/client/models/chatclassificationrequest.py: id: afd9cdc71834 - last_write_checksum: sha1:84cc02714fe8ae408a526ab68c143b9b51ea5279 - pristine_git_object: 450810225bb43bbd1539768e291840a210489f0f + last_write_checksum: sha1:91f62e46c415a0168442695f61cb30756227ed1a + pristine_git_object: 8b6d07b906c688a3849b8a4576cc10e075a6868f src/mistralai/client/models/chatcompletionchoice.py: id: 7e6a512f6a04 - last_write_checksum: sha1:dee3be3b6950e355b14cce5be6c34bd5d03ba325 - pristine_git_object: 5d888cfd73b82097d647f2f5ecdbdf8beee2e098 + last_write_checksum: sha1:de0281a258140f081012b303e3c14e0b42acdf63 + pristine_git_object: 2c515f6e9a290ebab43bae41e07493e4b99afe8f src/mistralai/client/models/chatcompletionrequest.py: id: 9979805d8c38 - last_write_checksum: sha1:6442737fd5552e01ad78ab4cf8bc10e0d9c75d05 - pristine_git_object: 30fce28d5e071797a7180753f2825d39cfeac362 + last_write_checksum: sha1:95c0879e52d8b6c1ff389a5dfe1776129c764c00 + pristine_git_object: 4f7d071b5a0b84ef27397b4acaf4a798b6178eb8 src/mistralai/client/models/chatcompletionresponse.py: id: 669d996b8e82 - last_write_checksum: sha1:af8071e660b09437a32482cdb25fd07096edc080 - pristine_git_object: 60a1f561ff29c3bc28ee6aea69b60b9d47c51471 + last_write_checksum: sha1:97f164fea881127ac82303e637b6a270e200ac5b + pristine_git_object: 7092bbc18425091d111ec998b33edc009ff0931b src/mistralai/client/models/chatcompletionstreamrequest.py: id: 18cb2b2415d4 - last_write_checksum: sha1:512f4c05b140757888db465e2bb30a0abcafb1d4 - pristine_git_object: 21dad38bb83e9b334850645ffa24e1099b121f6c + last_write_checksum: sha1:12e794c89a954702c3d4dccddad9b365331bd996 + pristine_git_object: ec7d2ae131cf5fac7eb618bbe09340ac23d444ef src/mistralai/client/models/chatmoderationrequest.py: id: 057aecb07275 - last_write_checksum: sha1:6c24f39ddd835278773bd72cb2676e8f1fd10e73 - pristine_git_object: 631c914d1a4f4453024665eb0a8233ec7a070332 + last_write_checksum: sha1:e18a5ae518f5413b1bff45f85f823b60e00ef32a + pristine_git_object: a8d021e8deb2015470765340281789a7fba544aa src/mistralai/client/models/checkpointout.py: id: 3866fe32cd7c - last_write_checksum: sha1:c2b57fe880c75290b100904c26afaadd356fbe88 - pristine_git_object: 89189ed19dc521bc862da0aec1997bba0854def7 + last_write_checksum: sha1:5ed4988914acef48854337127c4ca51791de3ab9 + pristine_git_object: 3e8d90e920cd34ff611f5e875c0163e1a4087f6f src/mistralai/client/models/classificationrequest.py: id: 6942fe3de24a - last_write_checksum: sha1:3b99dba1f7383defed1254fba60433808184e8e7 - pristine_git_object: c724ff534f60022599f34db09b517f853ae7968d + last_write_checksum: sha1:c98f6751aeba813b968aaf69c3551972b94da4c8 + pristine_git_object: 903706c31176da4c2ab021b3bcaeb2217ca98f76 src/mistralai/client/models/classificationresponse.py: id: eaf279db1109 - last_write_checksum: sha1:0e09986f5db869df04601cec3793552d17e7ed04 - pristine_git_object: 4bc21a58f0fb5b5f29357f2729250030b7d961bc + last_write_checksum: sha1:64522aa2b0970e86a0133348411592f95163f374 + pristine_git_object: d2f09f430c4bca39ea9e5423b7d604ea4016fc70 src/mistralai/client/models/classificationtargetresult.py: id: 2445f12b2a57 - last_write_checksum: sha1:9325f4db4e098c3bf7e24cfc487788e272a5896f - pristine_git_object: 89a137c374efc0f8b3ee49f3434f264705f69639 + last_write_checksum: sha1:2b8b9aeadee3b8ffe21efd1e0c842f9094c4ecc7 + pristine_git_object: 6c7d6231d211977332100112900ea0f8cdf5d84c src/mistralai/client/models/classifierdetailedjobout.py: id: d8daeb39ef9f - last_write_checksum: sha1:7e6df794c49d75785fac3bf01ea467a2dcbd224b - pristine_git_object: 1de4534fcb12440a004e94bc0eced7483952581d + last_write_checksum: sha1:1b6dde6554e51d9100f2e50779eff56b3ca07603 + pristine_git_object: bc5c5381d61b6b4945b51dc9836bcc2e7aa66f9f src/mistralai/client/models/classifierftmodelout.py: id: 2903a7123b06 - last_write_checksum: sha1:78bfdfa3b9188c44fe4cd9cf18bce9e1d1a4cd48 - pristine_git_object: a4572108674ea9c209b6224597878d5e824af686 + last_write_checksum: sha1:5141a0c29da0739057c52b2345a386c79d6f8f85 + pristine_git_object: 182f4954c2b3f1408cb05eee76e2bf24005b023e src/mistralai/client/models/classifierjobout.py: id: e19e9c4416cc - last_write_checksum: sha1:7384ea39ff4c341e8d84c3a4af664298b31c1440 - pristine_git_object: ab1e261d573a30714042af3f20ed439ddbf1d819 + last_write_checksum: sha1:c5daf7e879911ea24fba847a1c12ab9774ebbe98 + pristine_git_object: 03a5b11c46097733d609f3b075b58ef729f230a5 src/mistralai/client/models/classifiertargetin.py: id: ed021de1c06c - last_write_checksum: sha1:cd1c0b8425c752815825abaedab8f4e2589cbc8f - pristine_git_object: 231ee21e61f8df491057767eac1450c60e8c706a + last_write_checksum: sha1:8a1db343861e4f193a56d4030862c1f3a361d3e1 + pristine_git_object: b250109bd03976c93c571dbbacb1c631acd19717 src/mistralai/client/models/classifiertargetout.py: id: 5131f55abefe - last_write_checksum: sha1:4d9f66e3739f99ff1ea6f3468fe029d664541d58 - pristine_git_object: 957104a7bcc880d84ddefe39e58969b20f36d24c + last_write_checksum: sha1:304408da049ff4ad17f058267ffaa916ef907dc2 + pristine_git_object: 3d41a4d9c887488e7b08cc9d5d8dcb5b0fd26781 src/mistralai/client/models/classifiertrainingparameters.py: id: 4000b05e3b8d - last_write_checksum: sha1:a9d4eecd716bd078065531198f5a57b189caeb79 - pristine_git_object: 60f53c374ece9a5d336e8ab20c05c2d2c2d931f9 + last_write_checksum: sha1:4063f78ea65f138578bef4ce8908b04e556cc013 + pristine_git_object: f360eda504f0aa3f60ba6834aab59c1beb648151 src/mistralai/client/models/classifiertrainingparametersin.py: id: 4b33d5cf0345 - last_write_checksum: sha1:f50e68c14be4655d5cf80f6c98366d32bbd01869 - pristine_git_object: e24c9ddecf60c38e146b8f94ad35be95b3ea2609 + last_write_checksum: sha1:7764e6e6c5fc58e501c0891d036bbb22a8ddcb07 + pristine_git_object: 85360a7e7ba5212ef9052d3bd5f368ea4e2c4d98 src/mistralai/client/models/codeinterpretertool.py: id: 950cd8f4ad49 - last_write_checksum: sha1:9b720eaf4d7243e503e14350f457babbca9cf7af - pristine_git_object: faf5b0b78f2d9981bb02eee0c28bba1fdba795b9 + last_write_checksum: sha1:b014008db6ddce4b35aedec70783d74ce1b5cf83 + pristine_git_object: f69c7a5777af16df151589d2c5c8d81de4d28638 src/mistralai/client/models/completionargs.py: id: 3db008bcddca - last_write_checksum: sha1:4b4f444b06a286098ce4e5018ffef74b3abf5b91 - pristine_git_object: 010910f6f00a85b706a185ca5770fe70cc998905 + last_write_checksum: sha1:4c4ba2d39540bbb06fc1c49815fc6a7c8cf40ab2 + pristine_git_object: 918832acf3ea3d324c20e809fcdb1eae2ba3d7fd src/mistralai/client/models/completionargsstop.py: id: 5f339214501d - last_write_checksum: sha1:99912f7a10e92419308cf3c112c36f023de3fc11 - pristine_git_object: de7a09564540daaa6819f06195c347b3e01914f7 + last_write_checksum: sha1:744878976d33423327ea257defeff62073dad920 + pristine_git_object: 39c858e66380044e11d3c7fd705334d130f39dea src/mistralai/client/models/completionchunk.py: id: d786b44926f4 - last_write_checksum: sha1:e38d856ffefd3b72ff7034fa030ca0071caa0996 - pristine_git_object: 9790db6fe35e0043f3240c0f7e8172d36dee96f5 + last_write_checksum: sha1:04b634cffa4b0eb8ca177c91d62d333a061160df + pristine_git_object: 67f447d0c6cd97cb54ffcd0c620654629ac4e848 src/mistralai/client/models/completiondetailedjobout.py: id: 9bc38dcfbddf - last_write_checksum: sha1:0b0f7114471e650b877de2e149b69e772d29905f - pristine_git_object: 85c0c803cf809338900b7b8dcde774d731b67f8f + last_write_checksum: sha1:4771444753ff456829249d4e5fa5f71f2328fa78 + pristine_git_object: cd3a86ee28cdbf3a670d08f27642294321849ec0 src/mistralai/client/models/completionevent.py: id: c68817e7e190 - last_write_checksum: sha1:c29f7e8a5b357e15606a01ad23e21341292b9c5e - pristine_git_object: 52db911eeb62ec7906b396d6936e3c7a0908bb76 + last_write_checksum: sha1:dc43ac751e4e9d9006b548e4374a5ec44729eea4 + pristine_git_object: 3b90ab0c1ecac12f90e0ae3946a6b61410247e4f src/mistralai/client/models/completionftmodelout.py: id: 0f5277833b3e - last_write_checksum: sha1:6ae50b3172f358796cfeb154c7e59f9cdde39e61 - pristine_git_object: ccecbb6a59f2994051708e66bce7ece3598a786f + last_write_checksum: sha1:1c83e1d0a868eef32792844d787c5aaede0386b8 + pristine_git_object: 7ecbf54aabf022392e6d2ce2d0a354b9326eec79 src/mistralai/client/models/completionjobout.py: id: 712e6c524f9a - last_write_checksum: sha1:4f66641e3d765df1db88554b4399eded4625e08d - pristine_git_object: ecd95bb9c93412b222659e6f369d3ff7e13c8bb2 + last_write_checksum: sha1:2c8500593b8f9257a0a389f87792cd174fcd7209 + pristine_git_object: 42e5f6c65809aaaa02f0bf58fbf031f4c476208b src/mistralai/client/models/completionresponsestreamchoice.py: id: 5969a6bc07f3 - last_write_checksum: sha1:aa04c99a8bca998752b44fc3e2f2d5e24434a9bf - pristine_git_object: 1b8d6faccbe917aaf751b4efa676bf51c1dcd3ff + last_write_checksum: sha1:874d3553d4010a8b83484588dcbf9136bd8c6537 + pristine_git_object: 119a9690727ae296acf72dcfafdd224a61582599 src/mistralai/client/models/completiontrainingparameters.py: id: be202ea0d5a6 - last_write_checksum: sha1:fa4a0f44afeb3994c9273c5b4c9203eef810b957 - pristine_git_object: 36b285ab4f41209c71687a14c8650c0db52e165f + last_write_checksum: sha1:fd9a12417cd4f7bdc1e70ba05bbfef23b411ddd0 + pristine_git_object: 4b846b1b9bbcc4f2c13306169b715f08241e8f1c src/mistralai/client/models/completiontrainingparametersin.py: id: 0df22b873b5f - last_write_checksum: sha1:109503fabafd24174c671f2caa0566af2d46800e - pristine_git_object: d0315d9984575cb6c02bc6e38cedde3deef77b9a + last_write_checksum: sha1:a92e9df1d5be2a7f2d34b1dcde131e99e5ee351d + pristine_git_object: 20b74ad9fc0c50fe7d1d3dd97fcd3c296fbf7042 src/mistralai/client/models/contentchunk.py: id: c007f5ee0325 - last_write_checksum: sha1:a319b67206f4d0132544607482e685b46e2dce8c - pristine_git_object: 0a25423f9f9a95ced75d817ad7712747ce0915ae + last_write_checksum: sha1:5cedb52346bc34cb30950496d34ab87d591b6110 + pristine_git_object: eff4b8c670f47f53785690415751be05284f3d8b src/mistralai/client/models/conversationappendrequest.py: id: 81ce529e0865 - last_write_checksum: sha1:4f38d4aa2b792b113ef34ce54df3ac9b2efca5e1 - pristine_git_object: 867c0a414c1340033af7f6d03ea8cef2dcb8ff4a + last_write_checksum: sha1:83e883e4324d76d74521607390747ecdf7dffaa0 + pristine_git_object: 0f07475e4ca640ce50a6214fe59a91041a2e596a src/mistralai/client/models/conversationappendstreamrequest.py: id: 27ada745e6ad - last_write_checksum: sha1:41dcb9467d562bcc8feb885a56f73ac8d013c2d8 - pristine_git_object: f51407bf2a363f705b0b61ed7be4ef6249525af5 + last_write_checksum: sha1:12c3c63b763bd16398fcbec7d6fab41729ee81a6 + pristine_git_object: a0d46f727ff99d76a1bf26891df3b0ed80a88375 src/mistralai/client/models/conversationevents.py: id: 8c8b08d853f6 - last_write_checksum: sha1:4d7e8087fa9a074ed2747131c3753e723ba03e0b - pristine_git_object: 308588a1f094631935e4229f5538c5092f435d2c + last_write_checksum: sha1:6362a88ae26cb67f7abc3d2b0963f9a869c15371 + pristine_git_object: f24760381501f822593ef5903df0d32ca3cf9b47 src/mistralai/client/models/conversationhistory.py: id: 60a51ff1682b - last_write_checksum: sha1:637f7302571f51bcb5d65c51e6b6e377e8895b96 - pristine_git_object: 40bd1e7220160f54b0ab938b3627c77fb4d4f9ef + last_write_checksum: sha1:6fa8bdd370239df879da7b687c037405a8fbbe25 + pristine_git_object: 92d6cbf90c9c76945ee79752d5b4232aea10a79d src/mistralai/client/models/conversationinputs.py: id: 711b769f2c40 - last_write_checksum: sha1:3e8c4650808b8059c3a0e9b1db60136ba35942df - pristine_git_object: 4d30cd76d14358e12c3d30c22e3c95078ecde4bd + last_write_checksum: sha1:5fc688af61d6a49ede9c9709069f3db79f4dc615 + pristine_git_object: 7ce3ffc3772926a259d714b13bfc4ee4e518f8f7 src/mistralai/client/models/conversationmessages.py: id: 011c39501c26 - last_write_checksum: sha1:f71e85febab797d5c17b58ef8a1318545c974ed2 - pristine_git_object: 1ea05369b95fdaa7d7ae75398669f88826e5bb26 + last_write_checksum: sha1:408e26cb45dc1bdf88b1864d365e636307920df3 + pristine_git_object: 1aa294a497d2eb27a12dcbcce36c7956f6ee4f4e src/mistralai/client/models/conversationrequest.py: id: 58e3ae67f149 - last_write_checksum: sha1:0e3cdc7cb34cc8c7f646cc7c2869349747cfd47e - pristine_git_object: e3211c4c7b20c162473e619fad6dc0c6cea6b571 + last_write_checksum: sha1:f1b0b2b6a9c9b94ed5e3a77fb0b92e695f421a2e + pristine_git_object: 2005be82d8ebcf8c8fa74074abf25f072e795582 src/mistralai/client/models/conversationresponse.py: id: ad7a8472c7bf - last_write_checksum: sha1:ae6b273f3b1d1aff149d269a19c99d495fdf263e - pristine_git_object: 32d0f28f101f51a3ca79e4d57f4913b1c420b189 + last_write_checksum: sha1:8b625fe8808f239d6bc16ecf90ae1b7f42262c0c + pristine_git_object: 24598ef3fc24a61a0f15ab012aa211ba57cd0dcf src/mistralai/client/models/conversationrestartrequest.py: id: 681d90d50514 - last_write_checksum: sha1:76c5393b280e263a38119d98bdcac917afe36881 - pristine_git_object: aa2bf7b0dcdf5e343a47787c4acd00fe3f8bd405 + last_write_checksum: sha1:0ce81536464db32422165c35252770f3197fb38e + pristine_git_object: 35d3099361274440552e14934b6a1b19ebc8f195 src/mistralai/client/models/conversationrestartstreamrequest.py: id: 521c2b5bfb2b - last_write_checksum: sha1:5ba78bf9048b1e954c45242f1843eb310b306a94 - pristine_git_object: 689815ebcfe577a1698938c9ccbf100b5d7995f8 + last_write_checksum: sha1:b996f57271f0c521113913f48b31d54c17d73769 + pristine_git_object: 0ddfb130d662d954c3daabdf063172b8ea18a153 src/mistralai/client/models/conversationstreamrequest.py: id: 58d633507527 - last_write_checksum: sha1:d4cda0957f6d09ed991e3570b6e8ef81d3cf62af - pristine_git_object: 219230a2a8dd7d42cc7f5613ca22cec5fa872750 + last_write_checksum: sha1:fc4f2f1578fbeb959ddbe681dee2d11f0a4e6c5e + pristine_git_object: 379a8f2859b5f40cc744ad8f9bc6c39a198258b5 src/mistralai/client/models/conversationusageinfo.py: id: 6685e3b50b50 - last_write_checksum: sha1:7fa37776d7f7da6b3a7874c6f398d6f607c01b52 - pristine_git_object: 7a818c89a102fe88eebc8fec78a0e195e26cf85d - src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py: - id: 767aba526e43 - last_write_checksum: sha1:9a8f9917fc5de154e8a6fdb44a1dd7db55bb1de5 - pristine_git_object: 1cd36128a231a6d4be328fde53d1f048ff7c2ccd + last_write_checksum: sha1:60f91812b9b574b3fade418cc7c2191253f6abbf + pristine_git_object: 98db0f1617bd7484750652997dcd43d08ef7c5fc + src/mistralai/client/models/createfinetuningjobop.py: + id: fd3c305df250 + last_write_checksum: sha1:e29ada8f733de44bfeab2885d2221ade84b34619 + pristine_git_object: f55deef5d9f6134fddb02c458a0d812759cea358 + src/mistralai/client/models/createorupdateagentaliasop.py: + id: a79cf28bda01 + last_write_checksum: sha1:d4f2790b5970c9cf30b3fcee9d8bc6d4b8c33778 + pristine_git_object: cde1dd054c447a8617527585e783a95affba3277 + src/mistralai/client/models/deleteagentaliasop.py: + id: e4d0d7f75b24 + last_write_checksum: sha1:66e34ba7fb1a238d55c7ed380bd666c8975c01b4 + pristine_git_object: c52d099e9c1f28bf37ee009833b5fb8e351ed987 + src/mistralai/client/models/deleteagentop.py: + id: 089fb7f87aea + last_write_checksum: sha1:a196bcc758e36ffeb17fab25bb60451d3d66a4d8 + pristine_git_object: 8b14bca7bf5d67e16181b67ef6b7375c1b0a93fd + src/mistralai/client/models/deleteconversationop.py: + id: 86fefc353db0 + last_write_checksum: sha1:48f33b614ec087fdaf2b29d9c3eefd6e8d7d311f + pristine_git_object: 39607f40640c6dfa3ef20d913a90abee602b9b4a + src/mistralai/client/models/deletedocumentop.py: + id: 62522db1ccf2 + last_write_checksum: sha1:1a4e2e72a0d3cd24e184ce3cc5037f5ec7cdd9a5 + pristine_git_object: 400070a49bc046d8132bfc7dfe3e114faa719001 + src/mistralai/client/models/deletefileop.py: + id: 286b4e583638 + last_write_checksum: sha1:2561c1fe03ec3915dfa48fa354a86a56ba9b54c4 + pristine_git_object: 4feb7812f8acfa366e4b46fc914925df4f705528 src/mistralai/client/models/deletefileout.py: id: 5578701e7327 - last_write_checksum: sha1:76d209f8b3bba5e4bc984700fe3d8981c9d6142b - pristine_git_object: b25538bee35dedaae221ea064defb576339402c8 + last_write_checksum: sha1:a34520be2271c1e37fa8b3c1bdead843db7b1bb9 + pristine_git_object: c721f32cfe752c2c084efb72db3e5409795e387a + src/mistralai/client/models/deletelibraryaccessop.py: + id: df80945bcf19 + last_write_checksum: sha1:065aad372e0bbfd998fe3adc3389e3dbc9d5b674 + pristine_git_object: ca14c3ffc43be3aee14d6aa1f4805f0483d8b676 + src/mistralai/client/models/deletelibraryop.py: + id: cd0ce9bf8d51 + last_write_checksum: sha1:07840cbdb741bba291f1db1a1b54daca99e8f7ea + pristine_git_object: 5eb6fc310aa62454e3f7ed0766212c807125fe8c + src/mistralai/client/models/deletemodelop.py: + id: 2c494d99a44d + last_write_checksum: sha1:97dce35d527e03612068896572824cc0f13269c1 + pristine_git_object: 55c4b2422336ef6e148eedbd4a6a60846d187e9b src/mistralai/client/models/deletemodelout.py: id: ef6a1671c739 - last_write_checksum: sha1:ef2f6774eaf33c1c78368cd92bc4108ecccd9a6c - pristine_git_object: 5aa8b68fe3680d3b51127d6a6b6068b1303756e8 + last_write_checksum: sha1:4606683ef6da0aae7e88bc50144eddc83908f9d7 + pristine_git_object: bf22ed177ee91dce98bfd9b04f02e683c79e4860 src/mistralai/client/models/deltamessage.py: id: 68f53d67a140 - last_write_checksum: sha1:db65faf32a4abc2396eb1f694d3245fcc4173e2f - pristine_git_object: 0ae56da86f645e5a0db2a0aa4579342610243300 + last_write_checksum: sha1:ff7fa85086bd56863f7f4a255b008cfaa11a959c + pristine_git_object: fbb8231a310e90afd50951dd0f572ce3e0f029e6 src/mistralai/client/models/documentlibrarytool.py: id: 3eb3c218f457 - last_write_checksum: sha1:3f3dafea3df855f1fccaa6ece64df55b40b2d4f7 - pristine_git_object: 861a58d38125ca5af11772ebde39a7c57c39ad9c + last_write_checksum: sha1:e5bfb61a4a03a3b28837c27195f1bcd8cc14c6b2 + pristine_git_object: ff0f739391404604c1cc592c23507946aa0b693f src/mistralai/client/models/documentout.py: id: 7a85b9dca506 - last_write_checksum: sha1:2de0e0f9be3a2362fbd7a49ff664b43e4c29a262 - pristine_git_object: 39d0aa2a5a77d3eb3349ae5e7b02271c2584fe56 + last_write_checksum: sha1:f041a4866c67d1f81f62282918d625216a760355 + pristine_git_object: 3b1a5713c84512947a07d153792b17fcf3262dcb src/mistralai/client/models/documenttextcontent.py: id: e730005e44cb - last_write_checksum: sha1:ad7e836b5f885d703fd5f09c09aba0628d77e05b - pristine_git_object: b1c1aa073dff4dcdc59d070058221b67ce9e36f9 + last_write_checksum: sha1:c86f4b15e8fda1cd5c173da01462342cd22b7286 + pristine_git_object: b6904cb4267347b62a457a01b91a391500326da9 src/mistralai/client/models/documentupdatein.py: id: d19c1b26a875 - last_write_checksum: sha1:bad1cee0906961f555784e03c23f345194959077 - pristine_git_object: 02022b89ef2b87349e0d1dc4cccc3d1908a2d1aa + last_write_checksum: sha1:bddd412de340d050cfbdd4206a9fbb3d1660a045 + pristine_git_object: 669554de5d33f6163c8d08fefee52c1869662eba src/mistralai/client/models/documenturlchunk.py: id: 4309807f6048 - last_write_checksum: sha1:1253bdbe1233481622b76e340413ffb1d8996f0e - pristine_git_object: 00eb55357f19ac4534446e0ee761bdbccfb471e2 + last_write_checksum: sha1:186a684da48bb5d237769ecb3dbf1479a5c5ee55 + pristine_git_object: 304cde2b687e71b0d2fb0aee9b20826473375b25 + src/mistralai/client/models/downloadfileop.py: + id: 4d051f08057d + last_write_checksum: sha1:b80c5332cfdb043bb56f686e4e1c4bf26495b04b + pristine_git_object: fcdc01d644bdce8d1fc7896b5f8244a7a5311dfa src/mistralai/client/models/embeddingdtype.py: id: 77f9526a78df - last_write_checksum: sha1:962f629fa4ee8a36e731d33f8f730d5741a9e772 - pristine_git_object: 26eee779e12ae8114a90d3f18f99f3dd50e46b9e + last_write_checksum: sha1:a4e2ce6d00e6d1db287a5d9f4254b0947227f337 + pristine_git_object: 732c4ebe3678563ebcdbafd519f93317261586fb src/mistralai/client/models/embeddingrequest.py: id: eadbe3f9040c - last_write_checksum: sha1:c4f85f5b768afb0e01c9a9519b58286804cfbd6b - pristine_git_object: 1dfe97c8fa2162719d2a68e7a0ef2f348efa1f88 + last_write_checksum: sha1:6071612944c4c603803cc7f2adc1e9784549c70f + pristine_git_object: f4537ffa9bdc0a9a73101e1b1524fed1a09c1a65 src/mistralai/client/models/embeddingresponse.py: id: f7d790e84b65 - last_write_checksum: sha1:285531abf3a45de3193ed3c8b07818faac97eb32 - pristine_git_object: 64a28ea9f1c57ed6e69e1d49c5c83f63fa38fd36 + last_write_checksum: sha1:9bb53a5a860c8e10d4d504648d84da73068c0a83 + pristine_git_object: 6ffd68941f32f396998df9dded14ff8365926608 src/mistralai/client/models/embeddingresponsedata.py: id: 6d6ead6f3803 - last_write_checksum: sha1:ed821591832ebfa03acd0ce0a3ca5a0521e6fa53 - pristine_git_object: ebd0bf7b29e0a1aee442337fd02ce562fb2c5a3d + last_write_checksum: sha1:3e2430e6bd9b3c77a564f4e56edec1274446a1f4 + pristine_git_object: a689b290d5a4b360e409413c96bb5e7288ce2e2e src/mistralai/client/models/encodingformat.py: id: b51ec296cc92 - last_write_checksum: sha1:f9a3568cd008edb02f475a860e5849d9a40d0246 - pristine_git_object: be6c1a14e4680e24e70b8bbda018759056b784ca + last_write_checksum: sha1:ea907f86b00323d99df37f7ff45d582aace798e7 + pristine_git_object: 4a39d0295f9069ae9f749cf21dab450eaf145d19 src/mistralai/client/models/entitytype.py: id: 62d6a6a13288 - last_write_checksum: sha1:baefd3e820f1682bbd75ab195d1a47ccb3d16a19 - pristine_git_object: 9c16f4a1c0e61f8ffaee790de181572891db3f89 + last_write_checksum: sha1:015e2db9e8e5a3e4ce58442ccedaf86c66239dde + pristine_git_object: 56d82cbed237f32a8b00cfee4042dfe3e7053bcb src/mistralai/client/models/eventout.py: id: da8ad645a9cb - last_write_checksum: sha1:326b575403d313c1739077ad6eb9047ded15a6f5 - pristine_git_object: 5e118d4599e935bcd6196a7cbc1baae8f4a82752 + last_write_checksum: sha1:67f7cc29102a971d33b6cbbcb06ffcfe595227a5 + pristine_git_object: a0247555bb816061cb22f882406c11c3a9011818 src/mistralai/client/models/file.py: id: f972c39edfcf - last_write_checksum: sha1:40ddf9b7e6d3e9a77899cd9d32a9ac921c531c87 - pristine_git_object: a8bbc6fab46a49e7171cabbef143a9bbb48e763c + last_write_checksum: sha1:8d0adce8f4dfc676f6da6465547a0d187d4326f1 + pristine_git_object: dbbc00b50e5578230daefa47648954ead8ed8eb9 src/mistralai/client/models/filechunk.py: id: ff3c2d33ab1e - last_write_checksum: sha1:9ae8d68bfcb6695cce828af08e1c9a9ce779f1f3 - pristine_git_object: d8b96f69285ea967397813ae53722ca38e8d6443 + last_write_checksum: sha1:9f970ef8366df8087f9332a4b1986540063a1949 + pristine_git_object: 43ef22f861e0a275c7348133d0c4d04551477646 src/mistralai/client/models/filepurpose.py: id: a11e7f9f2d45 - last_write_checksum: sha1:154a721dbd5e0c951757a596a96e5d880ecf4982 - pristine_git_object: eef1b08999956fd45fe23f2c03bb24546207b4e3 - src/mistralai/client/models/files_api_routes_delete_fileop.py: - id: 2f385cc6138f - last_write_checksum: sha1:e7b7ad30a08b1033ecd5433da694f69a91029bfc - pristine_git_object: b71748669906990998cc79345f789ed50865e110 - src/mistralai/client/models/files_api_routes_download_fileop.py: - id: 8184ee3577c3 - last_write_checksum: sha1:7781932cc271d47a2965217184e1dd35a187de3f - pristine_git_object: fa9e491a95625dbedde33bc9ea344aaebf992902 - src/mistralai/client/models/files_api_routes_get_signed_urlop.py: - id: 0a1a18c6431e - last_write_checksum: sha1:797201cde755cf8e349b71dc2ff7ce56d1eabb73 - pristine_git_object: a05f826232396957a3f65cb1c38c2ae13944d43b - src/mistralai/client/models/files_api_routes_list_filesop.py: - id: b2e92f2a29b4 - last_write_checksum: sha1:711cc470b8dedefd2c2c7e2ae7dfa6c4601e0f30 - pristine_git_object: ace996318446667b2da3ca2d37bd2b25bcfbb7a7 - src/mistralai/client/models/files_api_routes_retrieve_fileop.py: - id: 5d5dbb8d5f7a - last_write_checksum: sha1:ea34337ee17bdb99ad89c0c6742fb80cb0b67c13 - pristine_git_object: 4a9678e5aa7405cbe09f59ffbdb6c7927396f06a - src/mistralai/client/models/files_api_routes_upload_fileop.py: - id: f13b84de6fa7 - last_write_checksum: sha1:2ca94437630dddc55c6dd624d715963b19b97a73 - pristine_git_object: 723c6cc264613b3670ac999829e66131b8424849 + last_write_checksum: sha1:8b167c02f9f33e32d5fd1c6de894693924f4d940 + pristine_git_object: 49a5568ff82ad4a85e15c8de911e8d6c98dcd396 src/mistralai/client/models/fileschema.py: id: 19cde41ca32a - last_write_checksum: sha1:29fe7d4321fc2b20ae5fa349f30492aeb155c329 - pristine_git_object: 9ecde454f0dac17997ef75e5cdb850cccc8020fe + last_write_checksum: sha1:245115d1f955324bce2eeb3220bdaa6906b28e92 + pristine_git_object: cbe9b0d17ad15ce02e9fd973fe49666885c6ff92 src/mistralai/client/models/filesignedurl.py: id: a1754c725163 - last_write_checksum: sha1:0987cc364694efd61c62ba15a57cfb74aa0d0cc8 - pristine_git_object: cbca9847568ab7871d05b6bb416f230d3c9cddfc + last_write_checksum: sha1:5d981b1743aa2d84818597b41a5f357b4256e9e0 + pristine_git_object: 53dff812ffe5c5859794424d49f8bd7f735cf3b0 src/mistralai/client/models/fimcompletionrequest.py: id: cf3558adc3ab - last_write_checksum: sha1:a62845c9f60c8d4df4bfaa12e4edbb39dcc5dcb7 - pristine_git_object: c9eca0af3ccacfd815bfb8b11768e289b4828f4e + last_write_checksum: sha1:db51cde0b13bb373097f2c158b665ccb3c5789f4 + pristine_git_object: e2f6032784c996d18c100b8b2cde4bb4432af884 src/mistralai/client/models/fimcompletionresponse.py: id: b860d2ba771e - last_write_checksum: sha1:00b5b7146932f412f8230da7164e5157d267a817 - pristine_git_object: 8a2eda0ced48f382b79e5c6d7b64b0c5f0b16c15 + last_write_checksum: sha1:dffd5a7005999340f57eaa94e17b2c82ddc7fd90 + pristine_git_object: 1345a116b7855ab4b824cf0369c0a5281e44ea97 src/mistralai/client/models/fimcompletionstreamrequest.py: id: 1d1ee09f1913 - last_write_checksum: sha1:9260ae9a12c37b23d7dfa8ec6d3029d1d8a133ed - pristine_git_object: 2954380238dec5540e321012b8aa6609e404114c + last_write_checksum: sha1:df973050b942b844280bf98f0a3abc90bd144bbb + pristine_git_object: 480ed17ab006e7afa321a91c5ccebd6380f8f60c src/mistralai/client/models/finetuneablemodeltype.py: id: 05e097395df3 - last_write_checksum: sha1:8694f016e8a4758308225b92b57bee162accf9d7 - pristine_git_object: f5b8b2ed45b56d25b387da44c398ae79f3a52c73 + last_write_checksum: sha1:daf4cd1869da582981023dea1074268da071e16a + pristine_git_object: 7b924bd7abc596f0607a513eee30e98cbf7ab57a src/mistralai/client/models/ftclassifierlossfunction.py: id: d21e2a36ab1f - last_write_checksum: sha1:9554b17b3139b54975aae989fb27e1c369bee4cd - pristine_git_object: c4ef66e0fe69edace4912f2708f69a6e606c0654 + last_write_checksum: sha1:ca90e2f1cd0b9054293bea304be0867c93f7fac2 + pristine_git_object: ccb0f21b5a69f91119bec9db6e9f3d876e4c35af src/mistralai/client/models/ftmodelcapabilitiesout.py: id: f70517be97d4 - last_write_checksum: sha1:44260fefae93bc44a099ff64eeae7657c489005c - pristine_git_object: be31aa3c14fb8fe9154ad8f54e9bf43f586951c7 + last_write_checksum: sha1:2bc7700ad89b7aab37fa02fcb6d9282bc252315e + pristine_git_object: 42269b785d9d5ad2257179f2c093c62637fb5dd6 src/mistralai/client/models/ftmodelcard.py: id: c4f15eed2ca2 - last_write_checksum: sha1:ab559da7dd290e4d2be5c6a3398732de887b2a74 - pristine_git_object: 36cb723df8bcde355e19a55105932298a8e2e33a + last_write_checksum: sha1:7441e4155beaa97cea47b6295017f567dd6eee1a + pristine_git_object: 570e95e2276b144e008e9ccf6a108faa1fc835f5 src/mistralai/client/models/function.py: id: 32275a9d8fee - last_write_checksum: sha1:f98db69c2fb49bbd6cff36fb4a25e348db6cd660 - pristine_git_object: 6e2b52edbd8d7cb6f7654eb76b7ca920636349cf + last_write_checksum: sha1:356a2c6c9d2437e60036a9b3d1a3d154302363c8 + pristine_git_object: 3632c1afb40aebab0795f754814036e04c251469 src/mistralai/client/models/functioncall.py: id: 393fca552632 - last_write_checksum: sha1:ef22d048ddb5390f370fcf3405f4d46fa82ed574 - pristine_git_object: 6cb6f26e6c69bc134bcb45f53156e15e362b8a63 + last_write_checksum: sha1:6e96e9abaa9b7625a9a30e376c31b596ee9defcb + pristine_git_object: 527c3ad408e1e1ccfe6301a8860e7f751e1d312d src/mistralai/client/models/functioncallentry.py: id: cd058446c0aa - last_write_checksum: sha1:661372b1ff4505cf7039ece11f12bb1866688bed - pristine_git_object: fce4d387df89a9fa484b0c7cc57556ea13278469 + last_write_checksum: sha1:6ece3816c50bd04b908743ad62e2dc71d815842a + pristine_git_object: 6ada1d358641a23bc83b93f222eeff659a124b34 src/mistralai/client/models/functioncallentryarguments.py: id: 3df3767a7b93 - last_write_checksum: sha1:6beb9aca5bfc2719f357f47a5627c9edccef051f - pristine_git_object: ac9e6227647b28bfd135c35bd32ca792d8dd414b + last_write_checksum: sha1:9858feba8f7f01017f10477a77dec851a1d06e55 + pristine_git_object: afe81b24e131a8ef879ee7f140271aa762b8ed2f src/mistralai/client/models/functioncallevent.py: id: 23b120b8f122 - last_write_checksum: sha1:c0226ca734320b628223f5c5206477b224dff15e - pristine_git_object: 4e040585285985cebc7e26ac402b6df8f4c063bb + last_write_checksum: sha1:cb63fb3cfb4debfca7b207b49e592566619f84b1 + pristine_git_object: 5d871a0e0f15cc27afe3c861f387609aa9a8a17f src/mistralai/client/models/functionname.py: id: 000acafdb0c0 - last_write_checksum: sha1:03d7b26a37311602ae52a3f6467fe2c306c468c1 - pristine_git_object: 2a05c1de42a6ff5775af5509c106eaa7b391778e + last_write_checksum: sha1:4145b7b817b712b85dcbedb309416c7ba72d827e + pristine_git_object: 07d98a0e65ccbcba330fb39c7f23e26d3ffc833c src/mistralai/client/models/functionresultentry.py: id: 213df39bd5e6 - last_write_checksum: sha1:7e6d951cfd333f9677f4c651054f32658794cc48 - pristine_git_object: a843bf9bdd82b5cf3907e2172ed793a391c5cba2 + last_write_checksum: sha1:04a8fd7396777c412fa9c73c0bef148b2ab53cb2 + pristine_git_object: ca73cbb7481fe0e97b354e9abe5ef6034f10bd98 src/mistralai/client/models/functiontool.py: id: 2e9ef5800117 - last_write_checksum: sha1:af5e38a4498149f46abd63eda97f9ccfb66a1fa3 - pristine_git_object: 74b50d1bcd2bc0af658bf5293c8cc7f328644fa1 + last_write_checksum: sha1:5c4ea61a1bccd87e1aae06bfa728c29a4ec60c54 + pristine_git_object: 13b0449687f64848cb2f2fdf792f148f9e3cfed9 + src/mistralai/client/models/getagentop.py: + id: 5a28bb1e727e + last_write_checksum: sha1:50a681253a1075f1268a269cd67154efa35dff6a + pristine_git_object: 55d8fe6860fa4c868c4d6d5d5d2ce4571e9071b4 + src/mistralai/client/models/getagentversionop.py: + id: a0db5a6aab1f + last_write_checksum: sha1:d1dfc0927abcae22460838902d1f5ddc2a224856 + pristine_git_object: 77b8a2662939e03b261f713aa7d9676746a4df1e + src/mistralai/client/models/getbatchjobop.py: + id: 443103fe3b88 + last_write_checksum: sha1:3a7f9656f3d169c60f0d3f16b00c4136d193468e + pristine_git_object: 792c3e2121902734094a7224c8605109fc697f44 + src/mistralai/client/models/getconversationhistoryop.py: + id: c863a4cbeb34 + last_write_checksum: sha1:4e04b4550c7b48635eca1943bcfee64027f0e7ca + pristine_git_object: c1fbf3de4ee966fffa2400a9c109d952b26543da + src/mistralai/client/models/getconversationmessagesop.py: + id: bb8a90ba7c22 + last_write_checksum: sha1:1b7aad5c74338aeecb11de44d8378aaa75498e37 + pristine_git_object: 6666198edce05a99c55f1c35f26f6d3b548c9b0d + src/mistralai/client/models/getconversationop.py: + id: 1a622b8337ac + last_write_checksum: sha1:4665e81fae4f12fabc09629f32d28c1c2de2bcf2 + pristine_git_object: d204d1755b4dc23ba8397ad24fec30bd064eacce + src/mistralai/client/models/getdocumentextractedtextsignedurlop.py: + id: 69099395d631 + last_write_checksum: sha1:f6d5e8499a314e903301e419fb206c33644363ff + pristine_git_object: 9a71181d3abd625643e741c562fe73f25bf12932 + src/mistralai/client/models/getdocumentop.py: + id: de89ff93d373 + last_write_checksum: sha1:4d1f358dfe3b44ccd2a88aea6730fbaf4b5f1d93 + pristine_git_object: d7b07db791a3adb3992475f0cf49c3fe01007ad9 + src/mistralai/client/models/getdocumentsignedurlop.py: + id: b8d95511c6d1 + last_write_checksum: sha1:255a0b505d558db3149652822718c7bcecc706e8 + pristine_git_object: e5d56c54c1ffc3529a8d1cf013bcb3327392b269 + src/mistralai/client/models/getdocumentstatusop.py: + id: f1f40b8f003f + last_write_checksum: sha1:c442daff8adb3db0ac58b03e54b7c05c82b202a9 + pristine_git_object: 4206f593ca58650f9df17b377b67c374a1b0d883 + src/mistralai/client/models/getdocumenttextcontentop.py: + id: ba23717093ef + last_write_checksum: sha1:33f047af38e4be2b71f4d90a36614ea7ab096a28 + pristine_git_object: 8a7b4aae025bbcb5ade5d4d36f2bb5e34cbb315e + src/mistralai/client/models/getfilesignedurlop.py: + id: 1aa50b81c8cf + last_write_checksum: sha1:a8fb95f119d173dd1d7afed02597a297dbbc7a89 + pristine_git_object: 06ed79eea058d4ebffc5d0b87ae2d06a32f4755a + src/mistralai/client/models/getfinetuningjobop.py: + id: afe997f96d69 + last_write_checksum: sha1:25db6d0d336a78189b603bbce16b0e0de84a33f1 + pristine_git_object: 1fb732f48a1a4c2993185a6a272879a83c80dc06 + src/mistralai/client/models/getlibraryop.py: + id: c84a92e23a90 + last_write_checksum: sha1:d51c0cf40a6ed398b0cb7078fe897d047b55e251 + pristine_git_object: bc0b4a238b146c6e5853e0b9d3031a876f30bc17 src/mistralai/client/models/githubrepositoryin.py: id: eef26fbd2876 - last_write_checksum: sha1:7736d0a475b47049c35aec59254c5d47b3ae609b - pristine_git_object: e56fef9ba187792238991cc9373a7d2ccf0b8c0d + last_write_checksum: sha1:cc98805951c3f80d9b8f0ba4037cf451551b0742 + pristine_git_object: e55389c380416f69ed7dc085cbbaaba056c4d1ba src/mistralai/client/models/githubrepositoryout.py: id: d2434a167623 - last_write_checksum: sha1:5d9625805bf6eb3c061ebdd73433ca2001e26cb1 - pristine_git_object: e3aa9ebc52e8613b15e3ff92a03593e2169dc935 + last_write_checksum: sha1:76d98ac7613e626599cb4c7a0b0366e9b20815ff + pristine_git_object: 514df01c217b40d8c050839ac40b938c68ef1bf6 src/mistralai/client/models/httpvalidationerror.py: id: 4099f568a6f8 - last_write_checksum: sha1:81432fd45c6faac14a6b48c6d7c85bbc908b175c - pristine_git_object: 34d9b54307db818e51118bc448032e0476688a35 + last_write_checksum: sha1:be2db0d4ec07da0ddb37878761545c3dde8fb8ec + pristine_git_object: e7f0a35bf208c32086c7b448273d1133d0f1027b src/mistralai/client/models/imagegenerationtool.py: id: e1532275faa0 - last_write_checksum: sha1:e5d4c986062850ce3ba4f66a8347848332192c21 - pristine_git_object: e09dba81314da940b2be64164e9b02d51e72f7b4 + last_write_checksum: sha1:85122792c3ba324086096345119fedf326f55c86 + pristine_git_object: 680c6ce2d08277e65e23ea3060e83c1fa4accb78 src/mistralai/client/models/imageurl.py: id: e4bbf5881fbf - last_write_checksum: sha1:d300e69742936f6e6583f580091827ada7da6c20 - pristine_git_object: 6e61d1ae2ec745774345c36e605748cf7733687b + last_write_checksum: sha1:9af5cff0b3a2c1c63e2bd1f998dcfeab273fd206 + pristine_git_object: 4ff13b1ccbc157f21013aacd7a062e89a26dcbf9 src/mistralai/client/models/imageurlchunk.py: id: 746fde62f637 - last_write_checksum: sha1:f6c19195337e3715fac3dc874abfc2333d661c8e - pristine_git_object: f967a3c8ced6d5fb4b274454100134e41c5b7a5c + last_write_checksum: sha1:57e48972720a3e317291250d6d94c44d295b69f5 + pristine_git_object: 993185cce833c59ad341b977cf9263654951fa03 src/mistralai/client/models/inputentries.py: id: 44727997dacb - last_write_checksum: sha1:afc03830974af11516c0b997f1cd181218ee4fb0 - pristine_git_object: 8ae29837a6c090fbe1998562684d3a372a9bdc31 + last_write_checksum: sha1:44ef8e75dd43b82276a0f06ef5c6be9eed46b379 + pristine_git_object: dc9892956f0e2583c51bf49ef89adbd22b8646d5 src/mistralai/client/models/inputs.py: id: 84a8007518c7 - last_write_checksum: sha1:62cf4c19b48f68f57f30223d48d06e33d08ae096 - pristine_git_object: fb0674760c1191f04e07f066e84ae9684a1431e3 + last_write_checksum: sha1:871491fa3b24315bc1bddf371334381f75ab035d + pristine_git_object: cfcdeb3d5895ccb34512c2a0a2e799e763e09c09 src/mistralai/client/models/instructrequest.py: id: 6d3ad9f896c7 - last_write_checksum: sha1:5fabc65cccf9f17ffbd20cd176341b4d78b62a5c - pristine_git_object: 1b2f269359700582687fdf4492ea3cef64da48bb + last_write_checksum: sha1:b56a77442b50b50151adedaa5ec356dc96c56428 + pristine_git_object: e5f9cccf174d8e73c42e8ee4aa294b43e1ad6cf5 src/mistralai/client/models/jobin.py: id: f4d176123ccc - last_write_checksum: sha1:ae6b1d9bc202db7a49d29f85b75bffea605126c5 - pristine_git_object: dc7684fcbecd558fc6e3e3f17c4000ec217285c1 + last_write_checksum: sha1:478a9beaf1c5ada536f5c333a47aa2ac0900bd16 + pristine_git_object: b3cb8998b5b0ce00414e40643eb3e259b2c0aabf src/mistralai/client/models/jobmetadataout.py: id: 805f41e3292a - last_write_checksum: sha1:5f84c58dab92d76de8d74f2e02cdf7b2b4c9cc12 - pristine_git_object: f91e30c09232b5227972b3b02ba5efbde22ac387 - src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py: - id: b56cb6c17c95 - last_write_checksum: sha1:e5e2c422bb211bb4af3e8c1a4b48e491d0fdf5a4 - pristine_git_object: 21a04f7313b3594a204395ca080b76e2a4958c63 - src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py: - id: 36b5a6b3ceee - last_write_checksum: sha1:5ada7f2b7a666f985c856a6d9cab1969928c9488 - pristine_git_object: 32e34281cd188f4d6d23d100fe0d45002030c56b - src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py: - id: d8f0af99c94d - last_write_checksum: sha1:3026ea0231866e792dd3cf83eb2b2bac93eda61b - pristine_git_object: 3557e773860e94d85f7a528d000f03adfcc60c2f - src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py: - id: 34f89d2af0ec - last_write_checksum: sha1:2a7225666b02d42be0d3455a249a962948feadf9 - pristine_git_object: 4536b738442ec9710ddf67f2faf7d30b094d8cd5 - src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py: - id: d175c6e32ecb - last_write_checksum: sha1:c61f02640ec384778e6f6b1f08dcb31dc5c1fb82 - pristine_git_object: b36d3c3ef5abb30abc886876bb66384ea41bab9e - src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py: - id: 81651291187a - last_write_checksum: sha1:80bc2d32588a115c4ac5571a3c1ffc8a24ab9d45 - pristine_git_object: ece0d15a0654ec759904276ad5d95c5619ff016f - src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py: - id: d910fd8fe2d6 - last_write_checksum: sha1:4f57772cda3075251f36c52a264ebce1328cb486 - pristine_git_object: aa5a26098e084885e8c2f63944e7549969899d3c - src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py: - id: cf43028824bf - last_write_checksum: sha1:e7bb3855dabfcaf7b92e6917e6da39246fc01282 - pristine_git_object: 7e399b31354e4f09c43efbe9ffe3d938f6af0d8c - src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py: - id: e7ff4a4a4edb - last_write_checksum: sha1:21d90c0a3fa151bd855d63ed241f518812f26f82 - pristine_git_object: ed5938b039be719169e62e033b7735bde7e72503 - src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py: - id: 7cc1c80335a9 - last_write_checksum: sha1:4270cb52e5aef807ec2d8a9ab1ca1065b0cf8a10 - pristine_git_object: e1be0ac00af889a38647b5f7e4f9d26ed09ee7c4 - src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py: - id: 6d9dc624aafd - last_write_checksum: sha1:ad615dd8d493fec4f818f19e5745ff52575181aa - pristine_git_object: a2b70b37e349c7f5fc6c687fbad015eb218de952 + last_write_checksum: sha1:1333181d5a3dff43076095f61e1d57f37085abbe + pristine_git_object: 1d386539d8c638d96b8f468cfca3241dfc07a9f3 src/mistralai/client/models/jobsout.py: id: 22e91e9631a9 - last_write_checksum: sha1:1bb48570e040fa9ad4408b41fef8ce4ec0bf52be - pristine_git_object: 9087704f0660e39f662efbd36f39713202598c43 + last_write_checksum: sha1:e9434f43df7df8e991eb0387eabcf308cae3cb65 + pristine_git_object: a4127a5d835c0f0ead04980f05cb293e18970905 src/mistralai/client/models/jsonschema.py: id: e1fc1d8a434a - last_write_checksum: sha1:6289875b78fab12efa9e3a4aa4bebdb08a95d332 - pristine_git_object: db2fa55ba9001bd3715451c15e9661a87ff7501a + last_write_checksum: sha1:6711508e9c1bd20fc8b1bfdbd1181ca29144ef0d + pristine_git_object: 948c94ed8fe8102a9cdced68fde6be03489f5778 src/mistralai/client/models/legacyjobmetadataout.py: id: 4f44aa38c864 - last_write_checksum: sha1:b6aba9032bb250c5a23f2ff2a8521b7bddcd1a06 - pristine_git_object: 155ecea78cb94fc1a3ffaccc4af104a8a81c5d44 - src/mistralai/client/models/libraries_delete_v1op.py: - id: b2e8bbd19baa - last_write_checksum: sha1:566db1febc40c73476af31a27201a208b64bc32a - pristine_git_object: fa447de067518abb355b958954ff9a3ee9b2cf6d - src/mistralai/client/models/libraries_documents_delete_v1op.py: - id: 81eb34382a3d - last_write_checksum: sha1:c7bd801e5f75d1716101721cd3e711be978cb7c5 - pristine_git_object: bc5ec6e5443b32d47e570c4f43c43827928a3e39 - src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py: - id: a7417ebd6040 - last_write_checksum: sha1:a298e22d9a68de87288419717b03273c1a26de6e - pristine_git_object: 24ed897d305cfccdc2b9717e214da901479cc70e - src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py: - id: d4b7b47913ba - last_write_checksum: sha1:0855bb39a09514fb5709bd3674eb5fcc618299f1 - pristine_git_object: 350c8e73992583b7890889c5ff252096a8df7fbd - src/mistralai/client/models/libraries_documents_get_status_v1op.py: - id: f314f73e909c - last_write_checksum: sha1:ca4679fbdc833b42e35b4c015ddf8434321d86eb - pristine_git_object: 92b077d3b5850985cac73ee880de7eab31a5b8fd - src/mistralai/client/models/libraries_documents_get_text_content_v1op.py: - id: 1ca4e0c41321 - last_write_checksum: sha1:8dbd91ab145d4c01e91502c9349477e1f98551d7 - pristine_git_object: 68f9725a1a390028e3118611bb0df1b4ab103943 - src/mistralai/client/models/libraries_documents_get_v1op.py: - id: 26ff35f0c69d - last_write_checksum: sha1:208b7ca22416295d27f51513e3fe58947e1549c7 - pristine_git_object: a67e687eaffebbee81654bbbb78ad00bcc28999c - src/mistralai/client/models/libraries_documents_list_v1op.py: - id: 756f26de3cbe - last_write_checksum: sha1:a742a58c137ecf1cfd7446d5f2f60211ff087751 - pristine_git_object: 5dec33858719e713c0fa07538aa0dfcab8d69dad - src/mistralai/client/models/libraries_documents_reprocess_v1op.py: - id: dbbeb02fc336 - last_write_checksum: sha1:516691f61c18e18b96738360a85acd34ba415ca0 - pristine_git_object: 8aee75522f7677e9f6fc49e2f8c5a75124db3dc7 - src/mistralai/client/models/libraries_documents_update_v1op.py: - id: 734ba6c19f5f - last_write_checksum: sha1:929f437a1c366b6cbecfc86b43436767712327f8 - pristine_git_object: f677b4ddc96b51ecd777240844800b2634ca4358 - src/mistralai/client/models/libraries_documents_upload_v1op.py: - id: "744466971862" - last_write_checksum: sha1:d6b085e01eac97f404a01e137413e159390c1382 - pristine_git_object: e2d59d9f1556ca77c0666b2bba3213ef5386f82a - src/mistralai/client/models/libraries_get_v1op.py: - id: d493f39e7ebb - last_write_checksum: sha1:d61166f6c399516d905c7376fabe56c102265747 - pristine_git_object: 83ae377d245e5c93a4a9118dd049a9096e9f3074 - src/mistralai/client/models/libraries_share_create_v1op.py: - id: feaacfd46dd3 - last_write_checksum: sha1:66ddb6685924e1702cfc40dbcb9a0d2e525cb57d - pristine_git_object: d0313bd01acd6e5403402d0d80a604a6c2812e19 - src/mistralai/client/models/libraries_share_delete_v1op.py: - id: 7f3a679ca384 - last_write_checksum: sha1:3ac568a5e09a6c74bc6779cd9c0bc3df36b24785 - pristine_git_object: 620527d50c15f5b14307e7735b429fe194469ed5 - src/mistralai/client/models/libraries_share_list_v1op.py: - id: 8f0af379bf1c - last_write_checksum: sha1:3d764be7232233229dc79079101270ace179e65f - pristine_git_object: fd5d9d33ce4b757b369d191621a727f71b5d2e35 - src/mistralai/client/models/libraries_update_v1op.py: - id: 92c8d4132252 - last_write_checksum: sha1:482c5b78278a6e729ed980191c6c1b94dbd890e6 - pristine_git_object: c434ab7a8be94042e6add582520dba11dc9d8d01 + last_write_checksum: sha1:e93d512c8cb6e0812248a195ff869428209cd71f + pristine_git_object: 4453c15798f4fd4db2de64e0beaf7ad557d82fa1 src/mistralai/client/models/libraryin.py: id: 6147d5df71d9 - last_write_checksum: sha1:5b7fe7a4bde80032bd36fad27f5854ad4bb1832f - pristine_git_object: a7b36158a165ab5586cba26cc1f96ab6fe938501 + last_write_checksum: sha1:34c5c9582a488fe87da084e74316e0fd76aa28d1 + pristine_git_object: 1a71d410d997a6d3f197947f821117e0605517af src/mistralai/client/models/libraryinupdate.py: id: 300a6bb02e6e - last_write_checksum: sha1:95060dfcdafbfe2deb96f450b128cd5d6f4e0e5a - pristine_git_object: f0241ba17f95b2c30a102bf1d09ac094c6e757e5 + last_write_checksum: sha1:c9b1a0a00d31fa839df12353f1a3ee9d0b3ffb60 + pristine_git_object: 328b2de3cd4e304fd462882eca7226e460b7c4a7 src/mistralai/client/models/libraryout.py: id: 4e608c7aafc4 - last_write_checksum: sha1:4089ffe9adc8e561b9ec093330c276de653bff7f - pristine_git_object: d1953f16490d40876d05cdd615a3ae8cbcbfd9f6 + last_write_checksum: sha1:9841adb596398554dfcaeb35b7e5a0572c541cff + pristine_git_object: c7ab7b8d39b68b5998c4874f9942caa275cf65d9 + src/mistralai/client/models/listagentaliasesop.py: + id: ff038766a902 + last_write_checksum: sha1:eef4e471999d5df5195aea51cde027b55567aeef + pristine_git_object: 83c6d1769c10fe38402a36b6aff2a18da61f4504 + src/mistralai/client/models/listagentsop.py: + id: a573a873c404 + last_write_checksum: sha1:db3c9e6ddc146138ed971f9970d9a164c0f97456 + pristine_git_object: 863fc13af1429bd1a6c02a9a20d2b6cb0cad7b34 + src/mistralai/client/models/listagentversionsop.py: + id: ccc5fb48e78f + last_write_checksum: sha1:0f2306bcceba2a2d7bfeb0be33126514d9287f17 + pristine_git_object: 613d3d8516690e6cba15922dfe69bdf62c039b01 + src/mistralai/client/models/listbatchjobsop.py: + id: f49af453f5e6 + last_write_checksum: sha1:e48b0e7371ee8f637e4fd6bed140cdbb1d405a7d + pristine_git_object: 5322df816e391a5569afcfd14edaeb128467a176 + src/mistralai/client/models/listconversationsop.py: + id: d6007f6c1643 + last_write_checksum: sha1:ece12b550abe6e17eb79f7a05593a93ea055f3f6 + pristine_git_object: 1c9a347c0ad4801c3a1b941e6328061d23d7dcd5 src/mistralai/client/models/listdocumentout.py: id: b2c96075ce00 - last_write_checksum: sha1:13c5461b89970ae00cdce8b80045ed586fd113b7 - pristine_git_object: 24969a0f6dc3d2e0badd650a2694d1ffa0062988 + last_write_checksum: sha1:fc3eca772d1e32938ea1bd2f3e98cdea5f1003f3 + pristine_git_object: a636b3deff66fe4277a63c04fc7dd6c5e74e58e7 + src/mistralai/client/models/listdocumentsop.py: + id: 3e42bdc15383 + last_write_checksum: sha1:d9beade6d8bb8050a67e32c2a73926b140015e68 + pristine_git_object: 0f7c4584d793c7e692a4bbc6678e18549b0e0364 + src/mistralai/client/models/listfilesop.py: + id: e5bd46ac0145 + last_write_checksum: sha1:3e0bc8a7318ffd1c3fe15f335ea2bc1e18c714a1 + pristine_git_object: a9af5c70c98adce56653ff01772fe5900530a36e src/mistralai/client/models/listfilesout.py: id: ae5fa21b141c - last_write_checksum: sha1:2ef7f78253cde73c3baae6aebeda6568bcb96c0d - pristine_git_object: 1db17c406778ac201dfcc1fd348a3e1176f05977 + last_write_checksum: sha1:4bc8ef424beb41c75d9c6fa4e101d330a951a99f + pristine_git_object: 460822f71fe8b0fc6292b804dc2a9de29bff4ef5 + src/mistralai/client/models/listfinetuningjobsop.py: + id: b77fe203b929 + last_write_checksum: sha1:af98423b166930cd18a1d377ea688540f3364166 + pristine_git_object: 8712c3fa6ac24094532fdfc047561997ea34552f + src/mistralai/client/models/listlibraryaccessesop.py: + id: 581b332626b7 + last_write_checksum: sha1:0a6bd277a706d807d87d3f2a4f870cc6ba917928 + pristine_git_object: 2206310f301f6ea40f14a495f5f6c6b4e76dbbf7 src/mistralai/client/models/listlibraryout.py: id: cb78c529e763 - last_write_checksum: sha1:044d3d17138c3af1feba6b980f92f8db7bd64578 - pristine_git_object: 24aaa1a9874d0e2054f6a49efe0f70101cec2fb2 + last_write_checksum: sha1:3cd81fd6f6d2421c6b6d06077f0bf1d5b3c96cad + pristine_git_object: 39fa459f7cc7be17c751025287d7827c9d141aac src/mistralai/client/models/listsharingout.py: id: ee708a7ccdad - last_write_checksum: sha1:0644f080e93a533f40579b8c59e5039dea4ee02d - pristine_git_object: f139813f54e97810502d658ad924911de646ab09 + last_write_checksum: sha1:18e6501b00a566121dfd6a1ce7b0e23fef297e45 + pristine_git_object: 443ad0d6a275c1c8bae4adda3e67621b068c0412 src/mistralai/client/models/messageentries.py: id: e13f9009902b - last_write_checksum: sha1:7c2503f4a1be5e5dde5640dd7413fed06aee09b4 - pristine_git_object: 9b1706dee5fbb5cae18674708a1571b187bf0576 + last_write_checksum: sha1:43aebdc9eaecc8341298dc6b281d0d57edf4e9e6 + pristine_git_object: a95098e01843fe3b4087319881967dc42c6e4fef src/mistralai/client/models/messageinputcontentchunks.py: id: 01025c12866a - last_write_checksum: sha1:740e82b72d5472f0cc967e745c07393e2df8ae38 - pristine_git_object: e90d8aa0317e553bfc0cceb4a356cf9994ecfb60 + last_write_checksum: sha1:9eab6d7734dcd4bf9da5222c1927f5f40ef45db0 + pristine_git_object: 63cf14e7fcbc7c3969220b4f07109473b246bf49 src/mistralai/client/models/messageinputentry.py: id: c0a4b5179095 - last_write_checksum: sha1:e9898424d5129750738adb6a049232162824282d - pristine_git_object: 12a31097a88e90645c67a30451a379427cd4fcd3 + last_write_checksum: sha1:b1b8f5b78eb5f57f5cfa7163ed49101736bcefaa + pristine_git_object: 15046d25130cda6571f07a456c2b5a67d2a3bcc0 src/mistralai/client/models/messageoutputcontentchunks.py: id: 2ed248515035 - last_write_checksum: sha1:f239151ae206f6e82ee3096d357ff33cf9a08138 - pristine_git_object: 136a7608e7e2a612d48271a7c257e2bb383584f3 + last_write_checksum: sha1:df4ef4d17ce48df271ff2b8cab297ae305aa08ec + pristine_git_object: def7a4d27cd3d1479864a1d6af19e89bd57bff70 src/mistralai/client/models/messageoutputentry.py: id: a07577d2268d - last_write_checksum: sha1:d0ca07d6bf6445a16761889bf04a5851abe21ea3 - pristine_git_object: d52e4e3e722ef221f565a0bd40f505385974a0e1 + last_write_checksum: sha1:0633b8c619883bedb1a6ad732c5487c7e7f817f9 + pristine_git_object: 8752fc36bfec39e0ab79d4593ae0cb43ea00641c src/mistralai/client/models/messageoutputevent.py: id: a2bbf63615c6 - last_write_checksum: sha1:fb98c35064fd9c65fa8c8c0cbc59293067ac793f - pristine_git_object: 3db7f5a0908a72f75f6f7303af4ad426a4909d84 + last_write_checksum: sha1:bbdb2c840a7a196edcb6ac6170e8273cc47a495e + pristine_git_object: 39c1013939ea238cb1c7ccbc05480a6840400061 src/mistralai/client/models/metricout.py: id: 92d33621dda7 - last_write_checksum: sha1:056f6e7e76182df649804034d722c5ad2e43294f - pristine_git_object: f8027a69235861ae8f04ccc185d61fa13cc8cc14 + last_write_checksum: sha1:6198ba9e2cd66fcf7f9fcc1cf89481edd432cf11 + pristine_git_object: 5705c71283ce7d4a01d60752657f39279c0f1f85 src/mistralai/client/models/mistralerror.py: id: 68ffd8394c2e - last_write_checksum: sha1:07fce1e971a25d95ffa8c8f3624d62cdf96e353a - pristine_git_object: 28cfd22dc3d567aa4ae55cc19ad89341fa9c96a1 + last_write_checksum: sha1:8b867eca5ca81aa6364f13c9d7e42f9b0d855724 + pristine_git_object: 862a6be8294db5b30bb06cb7b85d60c52ed8e8c9 src/mistralai/client/models/mistralpromptmode.py: id: 95abc4ec799a - last_write_checksum: sha1:ed0b87853d373d830b6572cbdf99d64f167b1d48 - pristine_git_object: 7008fc055bd1031096b7a486a17bf9a5b7841a4c + last_write_checksum: sha1:a1417b987bb34daeb73ca4e015c085814e6c8ad2 + pristine_git_object: 9b91323e7545d636308064085ca16fc554eac904 src/mistralai/client/models/modelcapabilities.py: id: 64d8a422ea29 - last_write_checksum: sha1:3857f4b989eeed681dffe387d48d66f880537db6 - pristine_git_object: a6db80e73189addcb1e1951a093297e0523f5fa4 + last_write_checksum: sha1:5bc65733cf1c2f4ee8e1b422636fda754bdf8afe + pristine_git_object: c329efbcd9be212c7428c09f28f897834c9239d3 src/mistralai/client/models/modelconversation.py: id: fea0a651f888 - last_write_checksum: sha1:beade63589bde3cae79f471a71e3d04d3f132f97 - pristine_git_object: 574f053d4186288980754ead28bb6ce19b414064 + last_write_checksum: sha1:6186e845be2717da6116e20072835c050d3fdaa5 + pristine_git_object: c0bacb7fd9cd052ecb31a72c6bf593504034e069 src/mistralai/client/models/modellist.py: id: 00693c7eec60 - last_write_checksum: sha1:d6ff956092c0c930a6db02cbe017bc473403639c - pristine_git_object: 6a5209fa6dac59539be338e9ac6ffbefd18057ee + last_write_checksum: sha1:89695c6a680da571c7a77c4544607bd83b3a93d5 + pristine_git_object: c122122c38a3331337cc702340cf1d3e0c9ef99d src/mistralai/client/models/moderationobject.py: id: 132faad0549a - last_write_checksum: sha1:d108ea519d2f491ddbc2e99ab5b8cc02e6987cf8 - pristine_git_object: a6b44b96f00f47c168cd1b2339b7aa44e6ca139e + last_write_checksum: sha1:742d942d72b615432c066827b822290cf4d51d40 + pristine_git_object: 9aa4eb15d837ab2af97faa131a362d50a3a85482 src/mistralai/client/models/moderationresponse.py: id: 06bab279cb31 - last_write_checksum: sha1:d31313c2164ecbc5a5714435a52b6f0dda87b8fe - pristine_git_object: 288c8d82d87a9944ae6d7a417bb92e558c6dcc0f + last_write_checksum: sha1:b9158e575276c1e0a510c129347b9a98c5a70567 + pristine_git_object: a8a8ec3d8d8a58deb3c1f8358c6dce5a9734f89c src/mistralai/client/models/no_response_error.py: id: 2849e0a482e2 - last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f - pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 + last_write_checksum: sha1:35b1651843a697024146d6377838b5b99c5c66d3 + pristine_git_object: 7705f1945567498ce606364490685a91b13cd8dd src/mistralai/client/models/ocrimageobject.py: id: 685faeb41a80 - last_write_checksum: sha1:93f3d24c4b7513fffef60d5590f3e5a4a0b6e1e4 - pristine_git_object: e97fa8df46c6e39775b3c938c7e1862a507090d2 + last_write_checksum: sha1:663f11a19e067d424263eee40d8127cdc56fb72e + pristine_git_object: e95b67e17e51653bf194ad1cff3a926f34cf97c2 src/mistralai/client/models/ocrpagedimensions.py: id: 02f763afbc9f - last_write_checksum: sha1:28e91a96916711bce831e7fa33a69f0e10298eed - pristine_git_object: f4fc11e0952f59b70c49e00d9f1890d9dd93a0df + last_write_checksum: sha1:f572ed8992ba1ba4d53b705c4e8c94c85ae1290e + pristine_git_object: 847205c6c74a621dd2ee6d9eb18d1acba8395c50 src/mistralai/client/models/ocrpageobject.py: id: 07a099f89487 - last_write_checksum: sha1:367035d07f306aa5ce73fc77635d061a75612a68 - pristine_git_object: f8b43601e7a3dd4fae554c763d3ed1ee6f2927a3 + last_write_checksum: sha1:10cbd1dddcb1f1f5d530048130908ad0ce715928 + pristine_git_object: 4f4ccf43011fa2563f79bb70ae2a813b84f04074 src/mistralai/client/models/ocrrequest.py: id: 36f204c64074 - last_write_checksum: sha1:d4b7a8bf70efe5828d04d773f4b82284a18656f1 - pristine_git_object: 03a6028c5cc298b3ed66ae5f31c310d573a954e5 + last_write_checksum: sha1:8e669292b846a5af4e3cee0b632524696e3067bc + pristine_git_object: 18b899dd5ecc162dc8e92622f56bed503fff80f7 src/mistralai/client/models/ocrresponse.py: id: 2fdfc881ca56 - last_write_checksum: sha1:fb848d5f5c1456028a1e04b9e4f5be3234fa073f - pristine_git_object: 2813a1ca4c94d690f248a318a9e35d655d80600c + last_write_checksum: sha1:4a28dbfcc076c149e4f08a830d4d7f770836eb15 + pristine_git_object: 0a36e97500b4f62adac2526d7dd7cb85c9bdb8b8 src/mistralai/client/models/ocrtableobject.py: id: d74dd0d2ddac - last_write_checksum: sha1:d562f3207193c7d5ef5d7b6175eba8006b6c3a73 - pristine_git_object: 0c9091de8975d8bd8e673aadbb69a619b96d77e8 + last_write_checksum: sha1:3116548673509f4e9f6a50d39f58ce3374823cc4 + pristine_git_object: e32ad894cd97546e635d12595051da103cde9fd8 src/mistralai/client/models/ocrusageinfo.py: id: 272b7e1785d5 - last_write_checksum: sha1:b466bdd22ad5fa5f08c8aa51e3a6ff5e2fcbf749 - pristine_git_object: 62f07fd4fafa4c16a8cf80a9f52754904943272a + last_write_checksum: sha1:b8fb06d0dad22f958ac756e65d70f5ba410ad47a + pristine_git_object: a421d850450bb3f0b62853c931cd457434d2f174 src/mistralai/client/models/outputcontentchunks.py: id: 9ad9741f4975 - last_write_checksum: sha1:47d74d212ebcb68ff75a547b3221f2aee3e07bfe - pristine_git_object: ad0c087e0dcbe302dd9c73d1ea03e109e9a66ef7 + last_write_checksum: sha1:afb76f3af2952c2afab5397e348ddfd6dbb56c4f + pristine_git_object: 1a115fe8b4874a6bd86719d91332cd3db6d95b46 src/mistralai/client/models/paginationinfo.py: id: 48851e82d67e - last_write_checksum: sha1:b17cc84c592706882d5819b1a706c9a206de9198 - pristine_git_object: 0252f4482f50b34a35f52911b4b57b6899751b42 + last_write_checksum: sha1:166961e2c0f573ba0677ee803820bb944a8a5efb + pristine_git_object: 2b9dab6258249f7be87e1d4a73a2502e21fe1f0d src/mistralai/client/models/prediction.py: id: 1cc842a069a5 - last_write_checksum: sha1:d9bd04d22d58e7e1be0195aaed218a4f407db9c0 - pristine_git_object: f2c5d9c60c50c6e397d7df9ce71ccff957b0e058 + last_write_checksum: sha1:ca391fc2f9faf1657392ceda952c2ee422121952 + pristine_git_object: 52f4adf1eb46d7d5679f9705871cd73e08ae8830 src/mistralai/client/models/processingstatusout.py: id: 3df842c4140f - last_write_checksum: sha1:83fbbccf635fabf60452dfa8dcac696033c3d436 - pristine_git_object: 031f386fb4381b8e2ead1bd22f7f53e59e37f6bb + last_write_checksum: sha1:007a476e4101cac4d2a9eef94d289f0f486d763a + pristine_git_object: 3acadcc9792c286cd31031a80e108b74bc2c0c4e src/mistralai/client/models/realtimetranscriptionerror.py: id: 8c2267378f48 - last_write_checksum: sha1:671be287639964cc6ac7efbed41998f225845e2e - pristine_git_object: e6a889de576f9e36db551a44d4ed3cf0c032e599 + last_write_checksum: sha1:b9642dd42c4092bdebe0a4f8d35c68152f259c05 + pristine_git_object: f8f2d3da9598ce0cd90d148ba1a9be0c5d6237cc src/mistralai/client/models/realtimetranscriptionerrordetail.py: id: 5bd25cdf9c7a - last_write_checksum: sha1:471824f03586b63688de43608d6c756b8a156e11 - pristine_git_object: 27bb8d872792723b06238b3f0eebed815948fd63 + last_write_checksum: sha1:a226b10718b1fe4a661311cbd98ea3b1d1ac4163 + pristine_git_object: cec1f6eabd44ceab4e58694a0862c9c90ea2f264 src/mistralai/client/models/realtimetranscriptionsession.py: id: 02517fa5411a - last_write_checksum: sha1:a6db31662165d3df47a5da11efd1923121d1593e - pristine_git_object: 3a3306513c111125c71871024caa650176360c1b + last_write_checksum: sha1:0073b248604f667e89e34cf01184a788ca84d63f + pristine_git_object: d20d0d8c94aeec425a2c1dfb93b72ac6878cb8af src/mistralai/client/models/realtimetranscriptionsessioncreated.py: id: 4e3731f63a3c - last_write_checksum: sha1:5d2e0541b58a3c647ded25d6a0cf8590f64cf0db - pristine_git_object: cc6d5028f221e1794c723dedac5c73564ddb61f7 + last_write_checksum: sha1:d3fb5c5dc417a0ebb12a30770324674e055526ae + pristine_git_object: c4fa5774502699529e27870436ca65b9f88ccfe1 src/mistralai/client/models/realtimetranscriptionsessionupdated.py: id: 686dc4f2450f - last_write_checksum: sha1:2311bf0107f0f957c48ee1841cc95369269a6105 - pristine_git_object: 3da23595291cd49e42d30646288f4f39da6f8c00 + last_write_checksum: sha1:7e4de1020672efc3503cda5b916b41056bf1d22b + pristine_git_object: a61fb05e8e5ba3ffa20bbb98bf61c17045c1f75c src/mistralai/client/models/referencechunk.py: id: 921acd3a224a - last_write_checksum: sha1:abfc5818dbe9e40be5d71436f2ffd1a9b53bd4ab - pristine_git_object: 4c703b8165329a55343c20b5080670168327afc4 + last_write_checksum: sha1:0dcff62499afdb1db0fd4f46614f8680f94837f4 + pristine_git_object: 7634d8ae07c96a99e634dcf888077f1d8cc4dc67 + src/mistralai/client/models/reprocessdocumentop.py: + id: b2913a7aa5c9 + last_write_checksum: sha1:07174ee58ec12909f08a08a9a6d7427ee9b2d5d0 + pristine_git_object: 48a4b72bf285e2f2e4b2d0c352ebc463518ed712 src/mistralai/client/models/requestsource.py: id: 3f2774d9e609 - last_write_checksum: sha1:31aae791bf737ad123fe189227d113838204ed42 - pristine_git_object: 7b0a35c44050b6fca868479e261805a77f33e230 + last_write_checksum: sha1:1ce68530a46793968f1122d29df722f0a5c9d267 + pristine_git_object: fc4433cb4e657b06aa6a4c078094c2df342810e2 src/mistralai/client/models/responsedoneevent.py: id: cf8a686bf82c - last_write_checksum: sha1:25972ca80ff7fd7a0d6dfe98718be52580dacc61 - pristine_git_object: 5405625692cb22c60a7b5f5a6f1b58cee5676576 + last_write_checksum: sha1:376c2a65f1fcdfe20d7cf0bd6aa6d8870a4f32c1 + pristine_git_object: ed331ff12c8728290b8ad17e52d9384265233665 src/mistralai/client/models/responseerrorevent.py: id: b286d74e8724 - last_write_checksum: sha1:a4767e8820ae840559fc55c8fcd346dea41a386e - pristine_git_object: c9ef95a04c91c32e7a7973309e2174b7e776f099 + last_write_checksum: sha1:ecff834ec62bf46d2aa5d9753f3898ed86caad45 + pristine_git_object: 8f196a52b469458da5c9f072983870da8c4fc4ea src/mistralai/client/models/responseformat.py: id: 6ab8bc8d22c0 - last_write_checksum: sha1:ad0489488713a977dbf4eac739ce2734c8280350 - pristine_git_object: 5899b0175cefd4159eb680a3715a72fa78577ba4 + last_write_checksum: sha1:e0c29239b4cd698af50412a1cab85217ccbb1796 + pristine_git_object: 409b80d658e4c93f4ee25c218fe74d65fd84ad44 src/mistralai/client/models/responseformats.py: id: c4462a05fb08 - last_write_checksum: sha1:863c7ec4c567d8f0c4e6305b47896424726e71be - pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 + last_write_checksum: sha1:3cb82d44a4f9df5e9a3f51867be6eab1d439d87a + pristine_git_object: 21345778ad2d41a3746292e67fec628f9ec2a84d src/mistralai/client/models/responsestartedevent.py: id: 24f54ee8b0f2 - last_write_checksum: sha1:1bd2a884b9f66eb811fc83d8c3644913dfa80ab1 - pristine_git_object: dc6a10f91e2bb0d13a582ed03e7db2089b75bcf7 + last_write_checksum: sha1:8be1513409934d7ea1c524e468954f7eda0a8c62 + pristine_git_object: 256d2a6c864edf4f3ccd77b2db139c11fe4f6727 src/mistralai/client/models/responsevalidationerror.py: id: c244a88981e0 - last_write_checksum: sha1:2687c9ca7df0763384030719e5c1447d83f511b3 - pristine_git_object: bab5d0b70e0bb2ea567a16a1a7c5db839651836f - src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py: - id: 6fefa90ca351 - last_write_checksum: sha1:f7308b269e12b2554a27de9d41312097d0d55d82 - pristine_git_object: 7fdcd37d5879aaca158f459df830a5a4dc55bfa0 + last_write_checksum: sha1:74a39321dee69f3487d9b9e01ffb2e40715176f4 + pristine_git_object: 1ed0d55266a106364fe58aa1e476fafbfbbbfdfe + src/mistralai/client/models/restartconversationop.py: + id: 2f6f3e4bbfd8 + last_write_checksum: sha1:9500d3ebea040ff4a203f3f025ff1bff8a397235 + pristine_git_object: b09eaed5bc8ecdbb7f1952c97b2e665462c70f9e + src/mistralai/client/models/restartconversationstreamop.py: + id: 16dc9ee5bf22 + last_write_checksum: sha1:b16f54529f4fd7d1422c82ff1a6dd5a9a82ba6bd + pristine_git_object: 3b2025f536d1c54ed58064b4be33aaafb9297ac4 + src/mistralai/client/models/retrievefileop.py: + id: ee73efdf9180 + last_write_checksum: sha1:330ec0a78a7ba623f21cd378b53250045bea984c + pristine_git_object: edd50e571cf56c6c22acc1777f6c9af38787f07d src/mistralai/client/models/retrievefileout.py: id: 8bb5859aa0d0 - last_write_checksum: sha1:9d182b5b20c8edef9b98a42036b13afd98031fd5 - pristine_git_object: ffd0617a1c6465a5f8080eb65e382e7a9169eef4 + last_write_checksum: sha1:1077bdb8fcc5ba22b2deb7f5c95fefe7b1fb82f5 + pristine_git_object: 2abf2161cd61d84f04836740a526c0e3711c3f6d + src/mistralai/client/models/retrievemodelop.py: + id: d883baa79c9e + last_write_checksum: sha1:525c7e9cf8594433cbb21374422067a75e6b53a9 + pristine_git_object: b4334e9a5541a14f7916244761213b883d507a41 src/mistralai/client/models/sampletype.py: id: a9309422fed7 - last_write_checksum: sha1:1eb21a68c138e9a0d39b4dd14bcffc9e3ff0784f - pristine_git_object: e0727b028c790a62da67784965f825436dead4f8 + last_write_checksum: sha1:86a61340a647696f6c35a82d945509b1c85aa6f7 + pristine_git_object: dfec7cce1e22ab607b6a9e947fa940284426086d src/mistralai/client/models/sdkerror.py: id: 12f991dad510 - last_write_checksum: sha1:9ee3f2dfd9977ce77957d60116db7d04740a4eed - pristine_git_object: ceb03c4868f9c9111007d6c16411f5da1954f211 + last_write_checksum: sha1:c2c344c8b7e23b0c93eeafedd25d28582467c3a7 + pristine_git_object: 101e1e6a67c3672e899b39dbfe10d45550a4449a src/mistralai/client/models/security.py: id: c2ca0e2a36b7 - last_write_checksum: sha1:415802794c6a3f22c58e863be0f633727f681600 - pristine_git_object: 1b67229bee0b64f3a9e8fc3600a7b0c9c13c0a2d + last_write_checksum: sha1:cec2a544790c2178f92742ac88e546efeacedb40 + pristine_git_object: 4fa8b4b2651f1d13811faf2da6e481243ea84e5a src/mistralai/client/models/shareenum.py: id: a0e2a7a16bf8 - last_write_checksum: sha1:0beaa4472ed607142b485c9e208441f9050746b9 - pristine_git_object: ca1b96245e81327aa830f07c0588dccdc1ee518e + last_write_checksum: sha1:15a84d57ceeb74cfb37275f714954e42d8e9b3ba + pristine_git_object: 08ffeb7e46fbbc28b7c93ef2aa4a49aff7c0d35e src/mistralai/client/models/sharingdelete.py: id: f5ecce372e06 - last_write_checksum: sha1:c943bfc24aa0f2035a1b5261d29efb5f3518a555 - pristine_git_object: d659342f1330d73354d557a45bc1a16015a38d8b + last_write_checksum: sha1:c5e4e6df47ef2d5715a99533a1efd936f0e7e16e + pristine_git_object: 202732cf785074446cd24360dd9c540768e4134f src/mistralai/client/models/sharingin.py: id: e953dda09c02 - last_write_checksum: sha1:996c17a8db2c61daed285ee5cafd44481fbd1483 - pristine_git_object: 630f4c70552167237735797f6b64d3f1df5ea214 + last_write_checksum: sha1:f60bd60d37f0accadf50ea111055fd99aa190a5f + pristine_git_object: 8cc3e8968d9d5460f040ebdb66d8f460e86d2c96 src/mistralai/client/models/sharingout.py: id: 0b8804effb5c - last_write_checksum: sha1:b3356792affd50e062bb1f1a84d835bbcfeb50ab - pristine_git_object: 195701d111514fe9aebfedce05dbb4bafab67fed + last_write_checksum: sha1:362bda8a5bd70d12e2de33814d3bd36a61c6d7ae + pristine_git_object: 778071546c12c2636d2deec6042e6b686b6428c6 src/mistralai/client/models/source.py: id: fcee60a4ea0d - last_write_checksum: sha1:6f3ea355c62280e1fc6008da69ed0b987f53fd72 - pristine_git_object: 181b327ea73a9bcf9fb90f95633da71cee96e599 + last_write_checksum: sha1:4d4277d75f7ce001780a069898b38afa7c8addc0 + pristine_git_object: fcea403cdbad44299fb2178f07a63bb7e83dc033 src/mistralai/client/models/ssetypes.py: id: 1733e4765106 - last_write_checksum: sha1:8154966cda84ddd5225936ee47c87df1143ee1f1 - pristine_git_object: 796f0327cbb1372c1b2a817a7db39f8f185a59be + last_write_checksum: sha1:3c79fc7c43cd018fba4950ba013ed15899b82ebf + pristine_git_object: 0add960bc93f53df5ddda94892543a0857f32dd6 + src/mistralai/client/models/startfinetuningjobop.py: + id: "663886392468" + last_write_checksum: sha1:6a6a409dd481359e8d6593fa2ea817007f8a967d + pristine_git_object: 805a8721cc7d048f172e1096ead0e410c7d04928 src/mistralai/client/models/systemmessage.py: id: 500ef6e85ba1 - last_write_checksum: sha1:4ca4da49acae5fb508584b1776d368eba7d4a119 - pristine_git_object: 9e01bc57bd17a5ecf6be5fee3383bbb9e03a8ab5 + last_write_checksum: sha1:af68936119bf7c067aec5215e2654c56a5df1755 + pristine_git_object: 352eca76ad5051cc2c504c673a23e048642fe018 src/mistralai/client/models/systemmessagecontentchunks.py: id: 297e8905d5af - last_write_checksum: sha1:4581a28c592708bf51dbc75b28fe9f7bddde3c70 - pristine_git_object: 7a79737964b79e39b760ef833cce24e411f5aa90 + last_write_checksum: sha1:e5695ca0ebdb0f02f3a0c527015df154a0c52b7f + pristine_git_object: d480a219e935aaea91adc320de0003b562c0bbb5 src/mistralai/client/models/textchunk.py: id: 9c96fb86a9ab - last_write_checksum: sha1:8abd7cb3d8149458d95268eea8f18d5096e77fb0 - pristine_git_object: 4207ce7e46141aed94cf0f8726bb2433709101ca + last_write_checksum: sha1:4ad624afaf4d83d4e58f72bcbd15b9faecc703f3 + pristine_git_object: c0584234da572bb903894633b123b1dda29e7736 src/mistralai/client/models/thinkchunk.py: id: 294bfce193a4 - last_write_checksum: sha1:a6cd3efbf01dc0a72818675893594179addcfd12 - pristine_git_object: b1560806b88b733bf3b574c3e0d45e93df892548 + last_write_checksum: sha1:d9c779959ed82ae3de66e481536d80bcc2ed57a5 + pristine_git_object: a999f5d7b824325085ec980cfa07294919408538 src/mistralai/client/models/timestampgranularity.py: id: 68ddf8d702ea - last_write_checksum: sha1:68ea11a4e27f23b2fcc976d0a8eeb95f6f28ba85 - pristine_git_object: 5bda890f500228ba0c3dc234edf09906b88cb522 + last_write_checksum: sha1:64e7b198a75f026590e26758112651d31984076f + pristine_git_object: 8d3773752444db865c0e2629ad9eed66eb7f2bc6 src/mistralai/client/models/tool.py: id: 48b4f6f50fe9 - last_write_checksum: sha1:5f80f78858fb50e0688123f8dd1478eeb0e7c5af - pristine_git_object: 4b29f575a3604d83fd6b492c26327f36e6e5a681 + last_write_checksum: sha1:14e7b21a2857e2ca36830730a47d0eca476fb491 + pristine_git_object: a46d31f166618fd5b92b7e76ccb9190796af7cd2 src/mistralai/client/models/toolcall.py: id: fb34a1a3f3c2 - last_write_checksum: sha1:f4c5de640f5b942f180062388be187a910067a1b - pristine_git_object: 558b49bfaec7c306c093b97a4bbf722fe9f4b6b1 + last_write_checksum: sha1:15ed0a4611e8c310640ec4622af8019d2db93355 + pristine_git_object: 4a05bbd04a44446babda8419dcf4d4c93248fe41 src/mistralai/client/models/toolchoice.py: id: 14f7e4cc35b6 - last_write_checksum: sha1:f833d01b307437a83705b9b669b0d95eab4c01e0 - pristine_git_object: 2c7f6cbf6ebfbbdcce7d82b885b5e07b6b52d066 + last_write_checksum: sha1:358a6e88486b4d372c9041dd15c0206b119bbc32 + pristine_git_object: aa2016fb63290c63f9b8f3e18c552f6598f15c8f src/mistralai/client/models/toolchoiceenum.py: id: c7798801f860 - last_write_checksum: sha1:d958ef93b303539226fdab0fd46c8ea21d24cea2 - pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 + last_write_checksum: sha1:5388b2a6fad842f8e4ae79e6257b4d14c122a6ff + pristine_git_object: d66c3d07058eb87bcc3eec10de99a616b5f6638a src/mistralai/client/models/toolexecutiondeltaevent.py: id: df8f17cf3e07 - last_write_checksum: sha1:96147badaad7eb961d224b29d9134dba8fc35f49 - pristine_git_object: 0268e6a0d9b3c25afe1022e61a630e926a50f135 + last_write_checksum: sha1:6ad6e219f3d7512c9fd382fb22471bfaa0fc9b09 + pristine_git_object: 384ec2407848f51434ca378ad7de965c584b163b src/mistralai/client/models/toolexecutiondoneevent.py: id: 514fdee7d99f - last_write_checksum: sha1:bc439993c647ba471b7f1581f72e094b99bd5c14 - pristine_git_object: 854baee98a119caf237ca0f39e4ddd7a36577771 + last_write_checksum: sha1:09ef4842c50419eda746f3361454c4df0c3c2466 + pristine_git_object: 56f28899b8b4161fcddfec0ed2610486fe6f8b06 src/mistralai/client/models/toolexecutionentry.py: id: 76db69eebe41 - last_write_checksum: sha1:4fb31b58961ce5f43233d91fb6efb89c624fab44 - pristine_git_object: 839709fb8ea63cc358de9f5e71180bf9e94cf5a5 + last_write_checksum: sha1:ff84f62c5264aa023f412956cf83604ecc4112a9 + pristine_git_object: 158cbf06a2acdd492ddb91ae8eaca4802da9f359 src/mistralai/client/models/toolexecutionstartedevent.py: id: 40fadb8e49a1 - last_write_checksum: sha1:d71ec6e61c1a881be8e02853f1ba450c36ec16e3 - pristine_git_object: 66438cfc33b171868f597ff3f80a82a40d1396f4 + last_write_checksum: sha1:5ba46ca1583e8245736a0ae81372025482a8504b + pristine_git_object: 1591866981ce1439fbce3736f028b15205d95810 src/mistralai/client/models/toolfilechunk.py: id: 26c8aadf416a - last_write_checksum: sha1:753db4dd27eea752066a04774094cba73aeb8ca0 - pristine_git_object: 62b5ffeda19a7fa614ccc5e390450f2452dd119d + last_write_checksum: sha1:1dd468876a2ff5ec8b15b6f4e6b8f812e640a29a + pristine_git_object: 6eebd562b1707b41b81e2fd0e267e4c8698551de src/mistralai/client/models/toolmessage.py: id: 15f1af161031 - last_write_checksum: sha1:58370491597186ddf08c8648f1e24abc9c852c26 - pristine_git_object: eae2d2aef69dc4134f42714d69625e7b6c43e8c9 + last_write_checksum: sha1:809936ebaeb4541f862aed6d26e1d1f5ff0ae58a + pristine_git_object: b3e8ffd9294bf6b0b46b26097abb87a5b96c9302 src/mistralai/client/models/toolreferencechunk.py: id: 822e9f3e70de - last_write_checksum: sha1:bf6b77aff4de13f4f374513e85785a1c6b17b87b - pristine_git_object: 882b1563a44cbc77256b6f44b1f41d602956d0b4 + last_write_checksum: sha1:f02c38c892580a6287156551e7964c601a239220 + pristine_git_object: 3c76c8c2dcc86d225c5218fa13cd43a693230fa8 src/mistralai/client/models/tooltypes.py: id: 86c3b54272fd - last_write_checksum: sha1:94cd31b4a170bde0983bc48e8c1148693c3d67e0 - pristine_git_object: abb26c258280a889d784e662b45ed486fc648817 + last_write_checksum: sha1:e90c15c1e645a5f207af0c7ac728cb0a521c6706 + pristine_git_object: e601c1967c42ef8d0c2eea98bc5c0ca722cde066 src/mistralai/client/models/trainingfile.py: id: 2edf9bce227d - last_write_checksum: sha1:12257eadce20511a4f3e3f3424e3bca112510f5f - pristine_git_object: 1d9763e0fd8e44f9b6e05254c5abb5a81fdf0b17 + last_write_checksum: sha1:668f05a3e3b883c2f54b1e541f1fb501605456b0 + pristine_git_object: 1f710ff81c046261ea497505d7216a1208c65d5b src/mistralai/client/models/transcriptionresponse.py: id: 60896dbc6345 - last_write_checksum: sha1:1f3066c34b7e76acc46ddb1e69869f3c62bfb841 - pristine_git_object: 24c0b92e424e91d40972c0826553a7d344a8f932 + last_write_checksum: sha1:3e5c20911697f5569029932fe0910da94feb2b06 + pristine_git_object: 786863ec331a4bdca18ac056f7447d11010d4320 src/mistralai/client/models/transcriptionsegmentchunk.py: id: d1e6f3bdc74b - last_write_checksum: sha1:23714fcd3791d09a7cc9a1bddd2f2203861d1bce - pristine_git_object: c89d84fcf3842da23e1f710309446b4c592ceeb3 + last_write_checksum: sha1:0107b6ee9160cd2a8309f7c8465502d7d0be90a8 + pristine_git_object: c78bec3068b95782bdc271c2e1ee645b115fed32 src/mistralai/client/models/transcriptionstreamdone.py: id: 066a9158ed09 - last_write_checksum: sha1:09bd7a12a1985d377883be53815f88195dcdce57 - pristine_git_object: add17f562385c3befc2932b16448901154372ca6 + last_write_checksum: sha1:3a6abc6f1a0ad78d73e32f3d40ef4bb425aee5b5 + pristine_git_object: b5740b3bb62b4db3846b7727b15e18502e39d862 src/mistralai/client/models/transcriptionstreamevents.py: id: b50b3d74f16f - last_write_checksum: sha1:651ae56098858fe8103ebd280bbdf2f74550794c - pristine_git_object: caaf943a4662ecccab96183f63c226eaefee2882 + last_write_checksum: sha1:f688a18317bd048ad89881c35cb80e39bb7cba47 + pristine_git_object: 17161a177721e44a40903cf15bf08ad0b56545de src/mistralai/client/models/transcriptionstreameventtypes.py: id: 6f71f6fbf4c5 - last_write_checksum: sha1:d7671637063c19222c20b8334abf92abe3d30517 - pristine_git_object: 4a910f0abca2912746cac60fd5a16bd5464f2457 + last_write_checksum: sha1:1d568460b1521f17dd5e551632ae4d7883a98dd3 + pristine_git_object: c74bbb7483cc3981ee3638c80c15924f3e1c20c4 src/mistralai/client/models/transcriptionstreamlanguage.py: id: e94333e4bc27 - last_write_checksum: sha1:7da587e67d635164bb986a3151a43b9a71b28c4d - pristine_git_object: b47024adfca2d8da6f1f01ce573bcc339cbbc63a + last_write_checksum: sha1:17c7b082ebf5764e21f124fe4c6a6ee5cea4fc51 + pristine_git_object: 67b3e9791efaf134580d82c2a12fab1cd33efbb1 src/mistralai/client/models/transcriptionstreamsegmentdelta.py: id: c0a882ce57e5 - last_write_checksum: sha1:91631a724a84abf4fd603ba7a7630b5e7d970944 - pristine_git_object: 7cfffb63f31d10a247e066c8f422e4f6af2cf489 + last_write_checksum: sha1:12cbfcf02d5cb4979a836e429690786153250bf0 + pristine_git_object: 8db5e73619eab98c3751689a7ec5bef45ef9ef6b src/mistralai/client/models/transcriptionstreamtextdelta.py: id: 6086dc081147 - last_write_checksum: sha1:1c065d9a2874c4b315fe3cd191f94ef3e8f1cc43 - pristine_git_object: ce279cf67ffc4e225ce37490f4ffd0c0d64fe993 + last_write_checksum: sha1:6b371b5d236e6e767f25160ab0e8a49bcaf356f8 + pristine_git_object: 49338a083332467e64f171637ca04365ca6bf25b src/mistralai/client/models/unarchiveftmodelout.py: id: 9dbc3bfb71ed - last_write_checksum: sha1:b2a1f9af7a5a7f5cbcda3256c46d02926e0cf2da - pristine_git_object: 511c390b4192cf85ec86150c7dad84543c68e031 + last_write_checksum: sha1:40a23dc39af81f06b23f21dad45c5c5f1178b2af + pristine_git_object: 0249a69e8552ed00a5e1f505fdc16025c46d2477 + src/mistralai/client/models/unarchivemodelop.py: + id: eb18584fd78c + last_write_checksum: sha1:5b81357950f301a82233b58a3e2a5b232fdbf546 + pristine_git_object: 1d68a06ae41559baffb6d54398b52dec630556c7 + src/mistralai/client/models/updateagentop.py: + id: ae3a6abea468 + last_write_checksum: sha1:3867948bd0ea37b597c4e5ef7a2e6881791a5fa5 + pristine_git_object: 28acc83d8df1373e897f9634dfbb84ee28897717 + src/mistralai/client/models/updateagentversionop.py: + id: 3821dca5b20a + last_write_checksum: sha1:4c41a450278858089c7cb23b8fcf1e4184fa1f1d + pristine_git_object: 114013bcdcfb7d7c9e935285f167a004b65fbd09 + src/mistralai/client/models/updatedocumentop.py: + id: eee9ef317180 + last_write_checksum: sha1:7d9fc6e18e6631cfe9cd1bc2fa5f7d6cd599ec19 + pristine_git_object: 073f22a9a28c18ad645212262bdc66528a1f6281 src/mistralai/client/models/updateftmodelin.py: id: 39e2d678e651 - last_write_checksum: sha1:dd8dda798b804c4927505ac1fcbd13787f32a25d - pristine_git_object: 0471a15458f3cff4939360d3891af0fdee9ec251 + last_write_checksum: sha1:4ea30ed8eaad36e1440614016f075f088c7e5781 + pristine_git_object: 4ac5a8a24026f6a975044de01a9918364aa64e04 + src/mistralai/client/models/updatelibraryop.py: + id: 4ba7acdb62c6 + last_write_checksum: sha1:3816c8eff226634b545843eed2d0c15fa1579308 + pristine_git_object: c5a1ad30e9bfc277cbbcdea0218a265ad10bcb96 + src/mistralai/client/models/updatemodelop.py: + id: ba149ecfe03e + last_write_checksum: sha1:2ce33ac60846a5ef70141dccbdb09950c3d1e348 + pristine_git_object: 023be97905929aa2925f20cd69b3591e6e3168d7 + src/mistralai/client/models/updateorcreatelibraryaccessop.py: + id: ec9b15418f5c + last_write_checksum: sha1:82fe6bbbb1402f03b7c0380c5fd84a8fef9bec67 + pristine_git_object: 1abe6eda3eb7d0aff8a7c146c848a63e299cedf1 + src/mistralai/client/models/uploaddocumentop.py: + id: 0018fe7ff48c + last_write_checksum: sha1:f31d565f419cbcc59af0655753cee6c643ad307a + pristine_git_object: 2c957947830ae0d467084cc6502d9d97ffdf6c81 + src/mistralai/client/models/uploadfileop.py: + id: d67619670938 + last_write_checksum: sha1:00664ba8af70ffc96871eee02890411475ca6c37 + pristine_git_object: 50848f0b663f60f9a303010f3c940919939949c9 src/mistralai/client/models/uploadfileout.py: id: 42466f2bebfb - last_write_checksum: sha1:db43df223f848a25a1526624cd3722ef3014e700 - pristine_git_object: 55e56504db280fdb4772bb061128742866555e82 + last_write_checksum: sha1:44d0e5d419fb82c56c33c0f9af8902b3cc06bf6d + pristine_git_object: be291efb523965c155fc922d896da2cf682378ab src/mistralai/client/models/usageinfo.py: id: 54adb9a3af16 - last_write_checksum: sha1:a5f57f73d176aa8f4a9ad91daefe8e6257398abc - pristine_git_object: f1186d97357320f4bfc9d3f2a626f58d2b1a38d0 + last_write_checksum: sha1:fcfdc921bbcc78436ef156dd7a2eff1123c4036f + pristine_git_object: e78f92e75f86fd593469f7267aad72e417178161 src/mistralai/client/models/usermessage.py: id: cb583483acf4 - last_write_checksum: sha1:1e33aea6971835069dc9c862351d507f48d4ff8d - pristine_git_object: 8d92cea803368e996d68dc2f3a2dadd1d06a4675 + last_write_checksum: sha1:215406ca76602e899620ef763e216d71f8cd9fcd + pristine_git_object: 25ccdf805e9fbc65da7b6d0051f13224cf0e04fa src/mistralai/client/models/validationerror.py: id: 15df3c7368ab - last_write_checksum: sha1:de86af94be29bd8bfd5fa2708eeb3dda3032423d - pristine_git_object: 352409be88a1175073e5438d6da86fc9a54896fc + last_write_checksum: sha1:63df5739d68f984470d4d1b8661a875201cc301d + pristine_git_object: 385714c8cb80a8afbca6d5142a2d378d0d165cf9 src/mistralai/client/models/wandbintegration.py: id: 4823c1e80942 - last_write_checksum: sha1:b33912c4e08c07b0139cc3c31f93e899f797b5f2 - pristine_git_object: 89489fb4527c6515a609bcb533ef59ab516c7a38 + last_write_checksum: sha1:6391a293368ba6fa98114ce510a7665b47d82721 + pristine_git_object: c5db4a6d409f1d84d356a471995119a070db627a src/mistralai/client/models/wandbintegrationout.py: id: 6b103d74195c - last_write_checksum: sha1:f64af59d7fe3d068e185776b01d43b7fdab1f129 - pristine_git_object: a7f9afeb6683a173115371a686af5f95e2d29056 + last_write_checksum: sha1:37caaf5224b216826c48912538959baa0a7d997a + pristine_git_object: d0a09bf48c3a24f5382a626d26897afe2d680f7e src/mistralai/client/models/websearchpremiumtool.py: id: bfe88af887e3 - last_write_checksum: sha1:689087bc6c49bbc8b286e5b0155a6e5f6a1dc47d - pristine_git_object: 8d2d4b5dfea50a34ac744181790bf5db84809b1c + last_write_checksum: sha1:9f9b4bfeea780cec16b9457ee800524c3eba7a4b + pristine_git_object: 9588ab1d7361d3ab1cba2f16e74695273cc03557 src/mistralai/client/models/websearchtool.py: id: 26b0903423e5 - last_write_checksum: sha1:93015f750a125a8297f9455278ebe482794ba958 - pristine_git_object: ba4cc09f84faebb438a631db6ac328fea2ced609 + last_write_checksum: sha1:9afaf3738be10d0a401b34e15db25612ee33465f + pristine_git_object: 27502909ea608f8e0b4a71484da94d26209e0c07 src/mistralai/client/models_.py: id: 1d277958a843 - last_write_checksum: sha1:987921077f5b5535c39a21216585fc1bf9aa8811 - pristine_git_object: 5ef9da096e58023aaa582f31717b4ee7a4b720b0 + last_write_checksum: sha1:f50e7b7194f97de4abf0afd70b5e1c52b805cef6 + pristine_git_object: 05b33ac72da14401b700c4abfb28ca33b5af702b src/mistralai/client/ocr.py: id: 2f804a12fc62 - last_write_checksum: sha1:877f0c2db0319ea6b5ccf3d92f35bf633df10eda - pristine_git_object: ce7e2126dda2bc2b12cefb96e955edd3c7d4b6ab + last_write_checksum: sha1:2cfde7a27733502b87690c1025adefe5b717da57 + pristine_git_object: 2aa382295a9f1561021a36f3a68a9fb505cfe536 src/mistralai/client/py.typed: id: d95cd1565e33 last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 src/mistralai/client/sdk.py: id: 48edbcb38d7e - last_write_checksum: sha1:831d2d1fee16c8d970c946f80ec56ba965e4f0ca - pristine_git_object: 9957940005a1150762e9fc284993cefeb2e8831a + last_write_checksum: sha1:be11dc3f70c773dd5c6baba6b3fafd996c5baec2 + pristine_git_object: b1ab54935a3421008c78f4864bd6097c0a098040 src/mistralai/client/sdkconfiguration.py: id: b7dd68a0235e - last_write_checksum: sha1:a24763668db44bf36ca35d1efa4873e2495dd716 - pristine_git_object: df50d16fa502e8b4c2a4567f3541fd48bfc1e324 + last_write_checksum: sha1:c6944f12c6fdc992d43db943b24c8c90854cde5e + pristine_git_object: 712e92e05c7fd3016431ec62ecb7b7789c8b7071 src/mistralai/client/transcriptions.py: id: 75b45780c978 - last_write_checksum: sha1:5c305412b646fa70232fd141e93378b3b4d4b3c4 - pristine_git_object: 455010243710d56d033861b1440cc1e30924d40c + last_write_checksum: sha1:b47a3765f2191715fc19bdbc4e56414abbe59f4b + pristine_git_object: f7ef5b0a0769467bd4bea61f7b0dca3b68c3788d src/mistralai/client/types/__init__.py: id: 000b943f821c - last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed - pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c + last_write_checksum: sha1:12a4ace69cbc63f1125eeddf901afed7cdf378b0 + pristine_git_object: cf83864312d8fed0bb9dd3ce39d373b040c36b2e src/mistralai/client/types/basemodel.py: id: 7ec465a1d3ff - last_write_checksum: sha1:10d84aedeb9d35edfdadf2c3020caa1d24d8b584 - pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee + last_write_checksum: sha1:b62a9d42d79a238399e04efbf5c02215c707efde + pristine_git_object: 4e889aa0ffbb4402e416a40fa6259334cb0a3c5c src/mistralai/client/utils/__init__.py: id: b69505f4b269 - last_write_checksum: sha1:c7c1ee47be7ac3774b042c8aee439143493ed3ce - pristine_git_object: f9c2edce8ecf2d2a4ab0ad36129ac70afd3d1f2f + last_write_checksum: sha1:adb457b85659a04945857a74407306dafbdce7cb + pristine_git_object: 7ed3a42095b5921adf0e154ae6eba560a1098233 src/mistralai/client/utils/annotations.py: id: 1ffdedfc66a2 - last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc - pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 + last_write_checksum: sha1:f86ba37de752e63076f25d53f9c54fce98d2a0bd + pristine_git_object: 4b60ab8e730e7093a064b6869c4a712b96e4aad8 src/mistralai/client/utils/datetimes.py: id: c40066d868c9 - last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 - pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 + last_write_checksum: sha1:412ca432d6f5a75b692a967bc6fc52e4f4eff7d5 + pristine_git_object: a2c94fac73ecbfb8acd8ed4f75692318e4f863ec src/mistralai/client/utils/enums.py: id: a0735873b5ac - last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d - pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 + last_write_checksum: sha1:fe05b6a21360b0eff1fc246e9a3ee01758521262 + pristine_git_object: d897495f053459106144501c67f2215251d52a27 src/mistralai/client/utils/eventstreaming.py: id: 3263d7502030 - last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b - pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc + last_write_checksum: sha1:0e15051d74262fbe051e1ba83fd1f2c0c0a016a0 + pristine_git_object: 3fe3c7e13509d6fab08fbb8504c6c5f674c2b679 src/mistralai/client/utils/forms.py: id: 58842e905fce - last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 - pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 + last_write_checksum: sha1:c7929d974f46629b56e740456ddf03230b4048ab + pristine_git_object: 2b474b9a719e95c4bcae8572e5569e64f8d0b77f src/mistralai/client/utils/headers.py: id: 9066de2ead8b - last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 - pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a + last_write_checksum: sha1:bcd2f47b96bfaa54b3590c557a9267142d446be6 + pristine_git_object: 6491187230b5f11c7ff13396891ac69099a73a79 src/mistralai/client/utils/logger.py: id: 745023607a1f - last_write_checksum: sha1:3212454c3047548e8f9099366dc0e7c37e5918ac - pristine_git_object: 2ef27ee5bb8cd37d9aa66b076c449fd9c80e2627 + last_write_checksum: sha1:2582e0cb889b6293c12ce9671aba6281d46bad44 + pristine_git_object: 3edad8307ea0ef38e857596a3ec11023a4af287f src/mistralai/client/utils/metadata.py: id: d49d535ae52c - last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 - pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d + last_write_checksum: sha1:54d300a665d3d5eafcc778a795d79347479b8337 + pristine_git_object: d46ffa59952926b7b1a842b0db2475527eda87df src/mistralai/client/utils/queryparams.py: id: bb77d4664844 - last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 - pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 + last_write_checksum: sha1:d02ce5b2dcc26edb7c937d75b98b70c22a5af189 + pristine_git_object: 0b78c548233f32afa2aafe0040ebb120b51532e8 src/mistralai/client/utils/requestbodies.py: id: 946cfcd26ee4 - last_write_checksum: sha1:41e2d2d2d3ecc394c8122ca4d4b85e1c3e03f054 - pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c + last_write_checksum: sha1:8cac30839193ee0bb02975b0e225eab97adf4fd1 + pristine_git_object: 3aae69c7cf618776daec8bd46f9116b06c25e837 src/mistralai/client/utils/retries.py: id: 5f1a5b90423c - last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 - pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 + last_write_checksum: sha1:94a86f31092553d4640a54c446cfe9028b4fb6ef + pristine_git_object: 90c008b0e20c1a539d65ffb387fb61a724c3c111 src/mistralai/client/utils/security.py: id: 1acb7c006265 - last_write_checksum: sha1:d86d2fd73cbb4a77f72395c10fff1c700efcf42e - pristine_git_object: 3b8526bfdf5c9c871ed184a2ec785f7bc1ebe57e + last_write_checksum: sha1:e8543609e699dab330a4768786883c6ca38f07a6 + pristine_git_object: 4c73806d9c8e54a2a4cfe8f62d8c281177789f6f src/mistralai/client/utils/serializers.py: id: 53c57c7f29a8 - last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 - pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 + last_write_checksum: sha1:8a3a15cf273034261111f2559cacbb579e17cb1b + pristine_git_object: fbc2772dc4284775be92de6a086c1eade9376417 src/mistralai/client/utils/unmarshal_json_response.py: id: b13585fc5626 - last_write_checksum: sha1:4df16054b0c28b043d248dd8f56992574156bcd0 - pristine_git_object: 6d43d6e44056d64e272f60a466c47391a60c792d + last_write_checksum: sha1:c0c44d0a656477daa225724e88a7cf5c954a1df6 + pristine_git_object: 65190e5c1d70a31f51656e1644bb701b9f132bcd src/mistralai/client/utils/url.py: id: 3c6496c17510 - last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 - pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 + last_write_checksum: sha1:c64be472d29cf229f2b91102808dcb741371c227 + pristine_git_object: 27a6a3a05287ff8a4e24e379ae5d20280c2caf30 src/mistralai/client/utils/values.py: id: bb6ade7a7f82 - last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 - pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 + last_write_checksum: sha1:da9ce43ad241db386efd9b2f53d81eb051dd7544 + pristine_git_object: 2469a9f310a37a7170b54853715274f13d38901c examples: list_models_v1_models_get: speakeasy-default-list-models-v1-models-get: @@ -3650,6 +3716,618 @@ examples: application/json: [{"alias": "", "version": 318290, "created_at": "2025-10-02T20:25:32.322Z", "updated_at": "2026-11-19T02:58:37.894Z"}] "422": application/json: {} + ListModels: + userExample: + responses: + "200": + application/json: {"object": "list"} + RetrieveModel: + speakeasy-default-retrieve-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": false, "function_calling": false, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false, "audio_transcription": false}, "max_context_length": 32768, "type": "base"} + "422": + application/json: {} + userExample: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": false, "function_calling": false, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false, "audio_transcription": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Principal Implementation Assistant", "root": "", "archived": false} + DeleteModel: + speakeasy-default-delete-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} + "422": + application/json: {} + userExample: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} + UpdateModel: + speakeasy-default-update-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + requestBody: + application/json: {} + responses: + "200": + application/json: {"id": "", "object": "model", "created": 76471, "owned_by": "", "workspace_id": "", "root": "", "root_version": "", "archived": true, "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": false, "fine_tuning": false, "classification": false}, "max_context_length": 32768, "job": "9765ed11-3bc9-49ff-a19d-06665406d404", "model_type": "completion"} + ArchiveModel: + speakeasy-default-archive-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "archived": true} + UnarchiveModel: + speakeasy-default-unarchive-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "archived": false} + StartConversation: + speakeasy-default-start-conversation: + requestBody: + application/json: {"inputs": "", "stream": false, "completion_args": {"response_format": {"type": "text"}}} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "function.call", "tool_call_id": "", "name": "", "arguments": ""}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + ListConversations: + speakeasy-default-list-conversations: + parameters: + query: + page: 0 + page_size: 100 + responses: + "200": + application/json: [{"completion_args": {"response_format": {"type": "text"}}, "object": "conversation", "id": "", "created_at": "2026-05-02T18:35:22.595Z", "updated_at": "2024-04-15T10:58:56.705Z", "model": "Silverado"}] + "422": + application/json: {} + GetConversation: + speakeasy-default-get-conversation: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation", "id": "", "created_at": "2026-10-30T16:36:24.274Z", "updated_at": "2026-03-08T22:30:16.213Z", "agent_id": ""} + "422": + application/json: {} + DeleteConversation: + speakeasy-default-delete-conversation: + parameters: + path: + conversation_id: "" + responses: + "422": + application/json: {} + AppendConversation: + speakeasy-default-append-conversation: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": [], "stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "function.call", "tool_call_id": "", "name": "", "arguments": {"key": "", "key1": ""}}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + GetConversationHistory: + speakeasy-default-get-conversation-history: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation.history", "conversation_id": "", "entries": []} + "422": + application/json: {} + GetConversationMessages: + speakeasy-default-get-conversation-messages: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation.messages", "conversation_id": "", "messages": []} + "422": + application/json: {} + RestartConversation: + speakeasy-default-restart-conversation: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "function.call", "tool_call_id": "", "name": "", "arguments": ""}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + StartConversationStream: + speakeasy-default-start-conversation-stream: + requestBody: + application/json: {"inputs": "", "stream": true, "completion_args": {"response_format": {"type": "text"}}} + responses: + "422": + application/json: {} + AppendConversationStream: + speakeasy-default-append-conversation-stream: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} + responses: + "422": + application/json: {} + RestartConversationStream: + speakeasy-default-restart-conversation-stream: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} + responses: + "422": + application/json: {} + CreateAgent: + speakeasy-default-create-agent: + requestBody: + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Mustang", "name": ""} + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Taurus", "name": "", "object": "agent", "id": "", "version": 388058, "versions": [980917, 959600], "created_at": "2024-07-23T17:25:11.997Z", "updated_at": "2025-07-14T09:13:03.268Z", "deployment_chat": false, "source": ""} + "422": + application/json: {} + ListAgents: + speakeasy-default-list-agents: + parameters: + query: + page: 0 + page_size: 20 + responses: + "200": + application/json: [{"completion_args": {"response_format": {"type": "text"}}, "model": "Challenger", "name": "", "object": "agent", "id": "", "version": 679172, "versions": [491437], "created_at": "2026-05-11T12:36:32.958Z", "updated_at": "2026-08-23T04:04:31.448Z", "deployment_chat": false, "source": ""}] + "422": + application/json: {} + GetAgent: + speakeasy-default-get-agent: + parameters: + path: + agent_id: "" + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "F-150", "name": "", "object": "agent", "id": "", "version": 928666, "versions": [246402], "created_at": "2024-02-28T12:05:26.160Z", "updated_at": "2024-05-16T04:31:56.940Z", "deployment_chat": false, "source": ""} + "422": + application/json: {} + UpdateAgent: + speakeasy-default-update-agent: + parameters: + path: + agent_id: "" + requestBody: + application/json: {"completion_args": {"response_format": {"type": "text"}}} + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "1", "name": "", "object": "agent", "id": "", "version": 388537, "versions": [955918, 365950, 823288], "created_at": "2026-11-04T08:06:14.896Z", "updated_at": "2025-05-23T04:44:27.181Z", "deployment_chat": true, "source": ""} + "422": + application/json: {} + DeleteAgent: + speakeasy-default-delete-agent: + parameters: + path: + agent_id: "" + responses: + "422": + application/json: {} + UpdateAgentVersion: + speakeasy-default-update-agent-version: + parameters: + path: + agent_id: "" + query: + version: 958693 + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "XTS", "name": "", "object": "agent", "id": "", "version": 203502, "versions": [449666], "created_at": "2024-09-21T15:29:30.503Z", "updated_at": "2026-10-29T17:49:52.509Z", "deployment_chat": true, "source": ""} + "422": + application/json: {} + ListAgentVersions: + speakeasy-default-list-agent-versions: + parameters: + path: + agent_id: "" + query: + page: 0 + page_size: 20 + responses: + "200": + application/json: [{"completion_args": {"response_format": {"type": "text"}}, "model": "Volt", "name": "", "object": "agent", "id": "", "version": 45747, "versions": [584697, 811109], "created_at": "2024-12-19T10:33:53.873Z", "updated_at": "2025-10-05T12:31:56.977Z", "deployment_chat": false, "source": ""}] + "422": + application/json: {} + GetAgentVersion: + speakeasy-default-get-agent-version: + parameters: + path: + agent_id: "" + version: "" + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Camaro", "name": "", "object": "agent", "id": "", "version": 663020, "versions": [210212], "created_at": "2026-11-16T03:32:55.781Z", "updated_at": "2026-09-28T23:51:49.611Z", "deployment_chat": true, "source": ""} + "422": + application/json: {} + CreateOrUpdateAgentAlias: + speakeasy-default-create-or-update-agent-alias: + parameters: + path: + agent_id: "" + query: + alias: "" + version: 154719 + responses: + "200": + application/json: {"alias": "", "version": 991981, "created_at": "2025-09-02T11:06:53.872Z", "updated_at": "2024-12-17T11:05:04.936Z"} + "422": + application/json: {} + ListAgentAliases: + speakeasy-default-list-agent-aliases: + parameters: + path: + agent_id: "" + responses: + "200": + application/json: [{"alias": "", "version": 345116, "created_at": "2025-03-19T21:46:52.564Z", "updated_at": "2026-07-18T22:23:53.218Z"}] + "422": + application/json: {} + DeleteAgentAlias: + speakeasy-default-delete-agent-alias: + parameters: + path: + agent_id: "" + query: + alias: "" + responses: + "422": + application/json: {} + ListLibraries: + speakeasy-default-list-libraries: + responses: + "200": + application/json: {"data": []} + CreateLibrary: + speakeasy-default-create-library: + requestBody: + application/json: {"name": ""} + responses: + "201": + application/json: {"id": "7a160c5d-b74e-44df-8beb-aca6894fa845", "name": "", "created_at": "2026-05-19T08:13:56.459Z", "updated_at": "2026-06-02T23:02:36.047Z", "owner_id": null, "owner_type": "", "total_size": 236146, "nb_documents": 584286, "chunk_size": 369781} + "422": + application/json: {} + GetLibrary: + speakeasy-default-get-library: + parameters: + path: + library_id: "44e385d6-783e-4b21-8fae-5181e6817bc4" + responses: + "200": + application/json: {"id": "785b8f2b-04c4-4e51-aeee-10ba7210996d", "name": "", "created_at": "2026-06-02T21:55:32.209Z", "updated_at": "2026-06-28T12:11:02.341Z", "owner_id": "489e38bd-4195-4ab1-a06d-f1253bcc0e7a", "owner_type": "", "total_size": 733226, "nb_documents": 896348, "chunk_size": 594373} + "422": + application/json: {} + DeleteLibrary: + speakeasy-default-delete-library: + parameters: + path: + library_id: "441ba08a-3d1f-4700-8d6f-f32eeed49dff" + responses: + "200": + application/json: {"id": "a03c22a9-d4f2-4735-806c-b8497fe2a882", "name": "", "created_at": "2024-03-20T22:16:14.073Z", "updated_at": "2025-08-10T22:18:39.851Z", "owner_id": null, "owner_type": "", "total_size": 735078, "nb_documents": 443485, "chunk_size": 738927} + "422": + application/json: {} + UpdateLibrary: + speakeasy-default-update-library: + parameters: + path: + library_id: "27049553-3425-49ce-b965-fcb3a7ab03a3" + requestBody: + application/json: {} + responses: + "200": + application/json: {"id": "0c44cb97-9c48-4e8b-9837-239b80130faf", "name": "", "created_at": "2025-02-22T01:07:38.404Z", "updated_at": "2024-01-02T09:35:39.994Z", "owner_id": "9ea3bb36-40f8-41f9-ba61-d6f71a725ff2", "owner_type": "", "total_size": 234996, "nb_documents": 664396, "chunk_size": 337104} + "422": + application/json: {} + ListDocuments: + speakeasy-default-list-documents: + parameters: + path: + library_id: "05e1bda5-99b1-4baf-bb03-905d8e094f74" + query: + page_size: 100 + page: 0 + sort_by: "created_at" + sort_order: "desc" + responses: + "200": + application/json: {"pagination": {"total_items": 985775, "total_pages": 196446, "current_page": 86746, "page_size": 671573, "has_more": false}, "data": [{"id": "9b168ce6-0e63-4d0a-b784-71cab0b43775", "library_id": "01d6c3ae-df9c-448d-8e84-873b6588d655", "hash": "", "mime_type": "", "extension": "shtml", "size": null, "name": "", "created_at": "2024-06-29T16:51:59.433Z", "processing_status": "", "uploaded_by_id": "ce40c587-9bb9-48d4-8bd3-5ce14f8f07c8", "uploaded_by_type": "", "tokens_processing_total": 288046}]} + "422": + application/json: {} + UploadDocument: + speakeasy-default-upload-document: + parameters: + path: + library_id: "f973c54e-979a-4464-9d36-8cc31beb21fe" + requestBody: + multipart/form-data: {"file": "x-file: example.file"} + responses: + "200": + application/json: {"id": "a13f4191-9721-413d-ac5c-b8edadbfb34e", "library_id": "a6ea3cdd-242f-4132-baf8-9a2589d78cb2", "hash": "", "mime_type": "", "extension": "mp4v", "size": 731796, "name": "", "created_at": "2024-04-30T08:38:55.667Z", "processing_status": "", "uploaded_by_id": "fd1426b3-90f8-4b54-97de-c4f108cb2a63", "uploaded_by_type": "", "tokens_processing_total": 603440} + "422": + application/json: {} + GetDocument: + speakeasy-default-get-document: + parameters: + path: + library_id: "f9902d0a-1ea4-4953-be48-52df6edd302a" + document_id: "c3e12fd9-e840-46f2-8d4a-79985ed36d24" + responses: + "200": + application/json: {"id": "52c93ba5-b31c-4717-a099-f3415e6d4eea", "library_id": "912f1e36-456c-4551-bd6d-535931a66817", "hash": "", "mime_type": "", "extension": "wav", "size": null, "name": "", "created_at": "2026-09-30T22:43:59.455Z", "processing_status": "", "uploaded_by_id": "8578215b-d0b4-4ee2-857d-dcb0686d45f1", "uploaded_by_type": "", "tokens_processing_total": 833979} + "422": + application/json: {} + UpdateDocument: + speakeasy-default-update-document: + parameters: + path: + library_id: "3b900c67-d2b6-4637-93f2-3eff2c85f8dd" + document_id: "66f935fd-37ec-441f-bca5-b1129befcbca" + requestBody: + application/json: {} + responses: + "200": + application/json: {"id": "6a5ec2ab-bd54-4cc8-a761-e51374243293", "library_id": "f8b3b9a7-bb4b-4b47-b3b2-bb6db5e92901", "hash": "", "mime_type": "", "extension": "gif", "size": null, "name": "", "created_at": "2025-11-09T02:41:11.680Z", "processing_status": "", "uploaded_by_id": "0f707dfd-bd39-42ad-9748-c0b305a13eb6", "uploaded_by_type": "", "tokens_processing_total": 170388} + "422": + application/json: {} + DeleteDocument: + speakeasy-default-delete-document: + parameters: + path: + library_id: "c728d742-7845-462b-84ad-2aacbaf1c7cf" + document_id: "ed3f5797-846a-4abe-8e30-39b2fd2323e0" + responses: + "422": + application/json: {} + GetDocumentTextContent: + speakeasy-default-get-document-text-content: + parameters: + path: + library_id: "12689dc1-50df-4a0d-8202-2757f7a8c141" + document_id: "9d4057e9-d112-437c-911e-6ee031389739" + responses: + "200": + application/json: {"text": ""} + "422": + application/json: {} + GetDocumentStatus: + speakeasy-default-get-document-status: + parameters: + path: + library_id: "41bb33c4-7e53-453d-bf21-398bb2862772" + document_id: "416b95cf-19c8-45af-84be-26aaa3ab3666" + responses: + "200": + application/json: {"document_id": "b5b43c40-8e91-41d9-933c-096ee588639a", "processing_status": ""} + "422": + application/json: {} + GetDocumentSignedUrl: + speakeasy-default-get-document-signed-url: + parameters: + path: + library_id: "2dbbe172-1374-41be-b03d-a088c733612e" + document_id: "b5d88764-47f1-4485-9df1-658775428344" + responses: + "200": + application/json: "" + "422": + application/json: {} + GetDocumentExtractedTextSignedUrl: + speakeasy-default-get-document-extracted-text-signed-url: + parameters: + path: + library_id: "46d040ce-ae2e-4891-a54c-cdab6a8f62d8" + document_id: "3eddbfe2-3fd7-47f5-984b-b378e6950e37" + responses: + "200": + application/json: "" + "422": + application/json: {} + ReprocessDocument: + speakeasy-default-reprocess-document: + parameters: + path: + library_id: "76d357e4-d891-40c6-9d1e-6d6ce5056ee0" + document_id: "09798d2b-8f46-46c6-9765-8054a82a4bb2" + responses: + "422": + application/json: {} + ListLibraryAccesses: + speakeasy-default-list-library-accesses: + parameters: + path: + library_id: "9eb628ef-f118-47eb-b3cc-9750c4ca5fb6" + responses: + "200": + application/json: {"data": [{"library_id": "98821ea0-f6e2-444d-b922-e649cd549a2a", "org_id": "a33230f8-b93d-4f45-80ce-b45e8dd8b5fe", "role": "", "share_with_type": "", "share_with_uuid": "0e1f6eb2-b59e-4e38-b916-382b3383c228"}]} + "422": + application/json: {} + UpdateOrCreateLibraryAccess: + speakeasy-default-update-or-create-library-access: + parameters: + path: + library_id: "88bb030c-1cb5-4231-ba13-742c56554876" + requestBody: + application/json: {"level": "Viewer", "share_with_uuid": "6a736283-c1fa-49b0-9b6d-ea9309c0a766", "share_with_type": "Workspace"} + responses: + "200": + application/json: {"library_id": "b783a30a-ca47-4c15-8095-dee3502846e5", "org_id": "6721ec8e-e0c0-4e8e-be83-3c01f2f884a5", "role": "", "share_with_type": "", "share_with_uuid": null} + "422": + application/json: {} + DeleteLibraryAccess: + speakeasy-default-delete-library-access: + parameters: + path: + library_id: "fc7ab1cf-e33c-4791-a6e0-95ff1f921c43" + requestBody: + application/json: {"share_with_uuid": "5818ddff-3568-40f1-a9e4-39d6cb9f5c94", "share_with_type": "Org"} + responses: + "200": + application/json: {"library_id": "6eeb1c0b-8c49-4745-8e3a-eef5bace0782", "org_id": "36550d6e-a514-4601-bd5b-7a0978aab0c7", "role": "", "share_with_type": "", "share_with_uuid": "023a9d84-8615-44a6-acd3-59b113a45c43"} + "422": + application/json: {} + UploadFile: + userExample: + requestBody: + multipart/form-data: {"file": "x-file: example.file"} + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "bytes": 13000, "created_at": 1759500189, "filename": "example.file.jsonl", "purpose": "fine-tune", "sample_type": "instruct", "num_lines": 2, "mimetype": "application/jsonl", "source": "upload", "signature": "d4821d2de1917341"} + ListFiles: + userExample: + parameters: + query: + page: 0 + page_size: 100 + include_total: true + responses: + "200": + application/json: {"data": [{"id": "", "object": "file", "bytes": 13000, "created_at": 1759491994, "filename": "", "purpose": "batch", "sample_type": "batch_result", "num_lines": 2, "mimetype": "application/jsonl", "source": "mistral", "signature": null}, {"id": "", "object": "file", "bytes": 13000, "created_at": 1759491994, "filename": "", "purpose": "batch", "sample_type": "batch_result", "num_lines": 2, "mimetype": "application/jsonl", "source": "mistral", "signature": null}], "object": "list", "total": 2} + RetrieveFile: + userExample: + parameters: + path: + file_id: "654a62d9-b7ee-49ac-835e-af4153e3c9ec" + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "bytes": 13000, "created_at": 1759500189, "filename": "example.file.jsonl", "purpose": "fine-tune", "sample_type": "instruct", "num_lines": 2, "mimetype": "application/jsonl", "source": "upload", "signature": "d4821d2de1917341", "deleted": false} + DeleteFile: + userExample: + parameters: + path: + file_id: "789c27a4-69de-47c6-b67f-cf6e56ce9f41" + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "deleted": true} + DownloadFile: + speakeasy-default-download-file: + parameters: + path: + file_id: "e2ba278e-eac9-4050-ae8e-ec433e124efb" + responses: + "200": + application/octet-stream: "x-file: example.file" + GetFileSignedUrl: + userExample: + parameters: + path: + file_id: "7a0c108d-9e6b-4c47-990d-a20cba50b283" + query: + expiry: 24 + responses: + "200": + application/json: {"url": "https://mistralaifilesapiprodswe.blob.core.windows.net/fine-tune/.../.../e85980c9409e4a46930436588f6292b0.jsonl?se=2025-10-04T14%3A16%3A17Z&sp=r&sv=2025-01-05&sr=b&sig=..."} + ListFineTuningJobs: + speakeasy-default-list-fine-tuning-jobs: + parameters: + query: + page: 0 + page_size: 100 + created_by_me: false + responses: + "200": + application/json: {"object": "list", "total": 677316} + CreateFineTuningJob: + speakeasy-default-create-fine-tuning-job: + requestBody: + application/json: {"model": "Countach", "invalid_sample_skip_percentage": 0, "hyperparameters": {"learning_rate": 0.0001}} + responses: + "200": + application/json: {"id": "18371b47-e157-4d80-8d09-2687df8868e8", "auto_start": false, "model": "Fiesta", "status": "FAILED", "created_at": 475667, "modified_at": 452225, "training_files": [], "object": "job", "job_type": "completion", "hyperparameters": {"learning_rate": 0.0001}} + GetFineTuningJob: + speakeasy-default-get-fine-tuning-job: + parameters: + path: + job_id: "2855f873-414e-4cf5-a46e-e589e39ee809" + responses: + "200": + application/json: {"id": "b9f4ad32-1400-4751-8e0d-16c09b4b26e6", "auto_start": true, "model": "LeBaron", "status": "QUEUED", "created_at": 458966, "modified_at": 377090, "training_files": ["52d812c3-b5fe-4866-878e-39a5910f91df"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "classifier_targets": [{"name": "", "labels": [], "weight": 6490.15, "loss_function": "single_class"}]} + CancelFineTuningJob: + speakeasy-default-cancel-fine-tuning-job: + parameters: + path: + job_id: "ee7d6f03-fcbb-43ca-8f17-0388c0832eb9" + responses: + "200": + application/json: {"id": "24b50383-3de5-4711-a14f-b71bbeccc6c5", "auto_start": true, "model": "Countach", "status": "CANCELLED", "created_at": 148194, "modified_at": 80833, "training_files": ["13ba2c85-5db5-4c14-94e4-2fcf030cecae", "85892e4f-5c84-4f38-bfb8-01072484489c", "723f89f0-65c0-43fa-9a9f-296acfe91134"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "classifier_targets": [{"name": "", "labels": ["", ""], "weight": 1717.09, "loss_function": "single_class"}]} + StartFineTuningJob: + speakeasy-default-start-fine-tuning-job: + parameters: + path: + job_id: "da371429-0ec2-4cea-b9c7-73ce3a1dd76f" + responses: + "200": + application/json: {"id": "2628c0c5-a98f-4d0b-a22a-fba0b0b23112", "auto_start": false, "model": "Model 3", "status": "QUEUED", "created_at": 139851, "modified_at": 571341, "training_files": ["856f394d-d216-41ab-8fa1-a42fba9e7734"], "object": "job", "job_type": "completion", "hyperparameters": {"learning_rate": 0.0001}} + ListBatchJobs: + speakeasy-default-list-batch-jobs: + parameters: + query: + page: 0 + page_size: 100 + created_by_me: false + order_by: "-created" + responses: + "200": + application/json: {"object": "list", "total": 853018} + CreateBatchJob: + speakeasy-default-create-batch-job: + requestBody: + application/json: {"endpoint": "/v1/classifications", "model": "mistral-small-latest", "timeout_hours": 24} + responses: + "200": + application/json: {"id": "", "object": "batch", "input_files": ["936962bc-f885-485f-914e-fe90c1d312f9", "02e65e71-6f9f-4c39-9a54-bba0acb1e912", "4c5d848d-d86e-43cb-a795-1eaba0c96608"], "endpoint": "", "errors": [], "status": "SUCCESS", "created_at": 346291, "total_requests": 784915, "completed_requests": 663597, "succeeded_requests": 195848, "failed_requests": 688098} + GetBatchJob: + speakeasy-default-get-batch-job: + parameters: + path: + job_id: "358c80a1-79bd-43f0-8f0e-8186713aa3ba" + responses: + "200": + application/json: {"id": "", "object": "batch", "input_files": ["782a7fa0-6ea1-4be9-bce9-9ff61f81530d"], "endpoint": "", "errors": [{"message": "", "count": 1}], "status": "SUCCESS", "created_at": 878725, "total_requests": 913781, "completed_requests": 964506, "succeeded_requests": 119373, "failed_requests": 490093} + CancelBatchJob: + speakeasy-default-cancel-batch-job: + parameters: + path: + job_id: "393537d7-8b33-4931-a289-7f61f8757eda" + responses: + "200": + application/json: {"id": "", "object": "batch", "input_files": ["7309e534-200e-43a4-83c5-dc4c2a14c745"], "endpoint": "", "errors": [], "status": "FAILED", "created_at": 157212, "total_requests": 188914, "completed_requests": 685483, "succeeded_requests": 127060, "failed_requests": 428561} examplesVersion: 1.0.2 generatedTests: {} generatedFiles: diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 20576b9d..23b915b5 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -22,13 +22,14 @@ generation: schemas: allOfMergeStrategy: shallowMerge requestBodyFieldName: "" - persistentEdits: {} + persistentEdits: + enabled: "true" tests: generateTests: true generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0a2 + version: 2.0.0a3 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/generated-files-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock b/.speakeasy/generated-files-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock deleted file mode 100644 index d6937e41..00000000 --- a/.speakeasy/generated-files-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock +++ /dev/null @@ -1,799 +0,0 @@ -src/mistralai/client/_hooks/sdkhooks.py -docs/models/messageoutputeventcontent.md -docs/models/classificationresponse.md -docs/models/tooltypes.md -docs/models/toolexecutionstartedevent.md -docs/models/unarchiveftmodeloutobject.md -src/mistralai/client/models/conversationrequest.py -docs/models/agentconversationobject.md -src/mistralai/client/models/listlibraryout.py -docs/models/agentsapiv1agentsupdaterequest.md -src/mistralai/client/models/agentaliasresponse.py -docs/models/embeddingresponse.md -docs/models/agentsapiv1agentsgetversionrequest.md -src/mistralai/client/models/libraryin.py -docs/models/agentscompletionstreamrequest.md -docs/models/function.md -docs/models/agentsapiv1agentsgetagentversion.md -docs/models/imagegenerationtool.md -docs/models/classifiertargetin.md -src/mistralai/client/models/realtimetranscriptionsession.py -src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py -docs/models/agentsapiv1conversationsgetrequest.md -docs/models/messageoutputentry.md -docs/models/classificationrequestinputs.md -docs/models/chatcompletionrequestmessage.md -docs/models/thinking.md -src/mistralai/client/models/conversationinputs.py -docs/models/functionresultentry.md -docs/models/fimcompletionstreamrequeststop.md -docs/models/librariesupdatev1request.md -src/mistralai/client/models/agents_api_v1_conversations_appendop.py -docs/models/paginationinfo.md -docs/models/agenthandoffentry.md -docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md -docs/models/moderationresponse.md -docs/models/toolexecutionentryobject.md -docs/models/completionresponsestreamchoicefinishreason.md -docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md -docs/models/modelcapabilities.md -docs/models/responseformats.md -docs/models/agentupdaterequest.md -src/mistralai/client/models/transcriptionstreamsegmentdelta.py -docs/models/sharingin.md -docs/models/responseformat.md -docs/models/imageurl.md -src/mistralai/client/models/processingstatusout.py -docs/models/messageoutputevent.md -src/mistralai/client/models/conversationusageinfo.py -src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py -src/mistralai/client/models/agents_api_v1_agents_get_versionop.py -src/mistralai/client/models/libraries_documents_get_v1op.py -docs/models/attributes.md -docs/models/agentscompletionrequeststop.md -src/mistralai/client/models/moderationresponse.py -src/mistralai/client/models/classifiertrainingparametersin.py -docs/models/audiochunk.md -src/mistralai/client/models/ocrrequest.py -src/mistralai/client/models/file.py -src/mistralai/client/models/ocrresponse.py -src/mistralai/client/models/classifiertargetin.py -docs/models/agentconversationagentversion.md -docs/models/classificationtargetresult.md -docs/models/tableformat.md -docs/models/classifiertrainingparameters.md -src/mistralai/client/models/shareenum.py -.vscode/settings.json -docs/models/messageoutputentrycontent.md -py.typed -docs/models/agentscompletionrequest.md -docs/models/completionjoboutrepository.md -src/mistralai/client/models/batchrequest.py -docs/models/entry.md -src/mistralai/client/models/modelcapabilities.py -docs/models/file.md -src/mistralai/client/models/mistralpromptmode.py -scripts/publish.sh -docs/models/agentscompletionstreamrequestmessage.md -docs/models/messageinputentrytype.md -src/mistralai/client/__init__.py -src/mistralai/client/_version.py -src/mistralai/client/models/ocrpageobject.py -docs/models/ocrimageobject.md -src/mistralai/client/basesdk.py -docs/models/ocrpagedimensions.md -src/mistralai/client/httpclient.py -docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md -src/mistralai/client/py.typed -src/mistralai/client/types/__init__.py -docs/models/agentsapiv1agentsupdateversionrequest.md -src/mistralai/client/types/basemodel.py -src/mistralai/client/utils/__init__.py -src/mistralai/client/utils/annotations.py -src/mistralai/client/utils/datetimes.py -src/mistralai/client/utils/enums.py -src/mistralai/client/models/inputs.py -src/mistralai/client/utils/eventstreaming.py -src/mistralai/client/utils/forms.py -src/mistralai/client/utils/headers.py -src/mistralai/client/models/legacyjobmetadataout.py -src/mistralai/client/utils/logger.py -src/mistralai/client/utils/metadata.py -src/mistralai/client/utils/queryparams.py -src/mistralai/client/utils/requestbodies.py -src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py -docs/models/toolexecutionentryname.md -src/mistralai/client/utils/retries.py -src/mistralai/client/utils/security.py -src/mistralai/client/models/toolfilechunk.py -src/mistralai/client/utils/serializers.py -src/mistralai/client/models/transcriptionstreamdone.py -src/mistralai/client/utils/unmarshal_json_response.py -src/mistralai/client/utils/url.py -src/mistralai/client/utils/values.py -src/mistralai/client/models/responsevalidationerror.py -src/mistralai/client/models/retrievefileout.py -src/mistralai/client/models/mistralerror.py -docs/models/apiendpoint.md -src/mistralai/client/models/sdkerror.py -docs/models/jobsout.md -src/mistralai/client/models/no_response_error.py -docs/models/conversationrestartstreamrequesthandoffexecution.md -docs/models/functiontool.md -docs/models/agentsapiv1conversationsappendstreamrequest.md -docs/models/agenthandoffentryobject.md -docs/models/transcriptionstreameventtypes.md -docs/models/messageoutputeventrole.md -src/mistralai/client/models/modellist.py -docs/models/responseretrievemodelv1modelsmodelidget.md -docs/models/referencechunktype.md -docs/models/chatclassificationrequest.md -src/mistralai/client/models/responseformats.py -docs/models/librariesdocumentsdeletev1request.md -src/mistralai/client/models/conversationresponse.py -src/mistralai/client/models/completionargsstop.py -src/mistralai/client/models/contentchunk.py -docs/models/classifierdetailedjoboutstatus.md -docs/models/listlibraryout.md -docs/models/transcriptionstreamevents.md -src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py -docs/models/chatcompletionrequeststop.md -src/mistralai/client/models/libraries_update_v1op.py -src/mistralai/client/models/websearchtool.py -src/mistralai/client/models/classifiertrainingparameters.py -docs/models/validationerror.md -src/mistralai/client/models/documentlibrarytool.py -src/mistralai/client/models/responsestartedevent.py -docs/models/document.md -src/mistralai/client/models/filesignedurl.py -src/mistralai/client/models/fimcompletionresponse.py -docs/models/agentscompletionstreamrequeststop.md -docs/models/agenthandoffentrytype.md -docs/models/conversationmessages.md -src/mistralai/client/models/responsedoneevent.py -docs/models/completionresponsestreamchoice.md -docs/models/fimcompletionresponse.md -src/mistralai/client/models/unarchiveftmodelout.py -src/mistralai/client/conversations.py -src/mistralai/client/models/toolexecutionstartedevent.py -src/mistralai/client/models/jsonschema.py -docs/models/completionftmodelout.md -src/mistralai/client/models/fimcompletionstreamrequest.py -docs/models/chatcompletionrequesttoolchoice.md -src/mistralai/client/models/tooltypes.py -src/mistralai/client/models/functionname.py -docs/models/functionresultentryobject.md -docs/models/classifierjobout.md -src/mistralai/client/models/listfilesout.py -src/mistralai/client/models/agents_api_v1_agents_listop.py -src/mistralai/client/models/imageurl.py -src/mistralai/client/models/chatcompletionchoice.py -src/mistralai/client/sdk.py -docs/models/conversationrequesttool.md -docs/models/chatcompletionrequest.md -docs/models/librariesdeletev1request.md -src/mistralai/client/models/chatcompletionresponse.py -docs/models/toolreferencechunktool.md -src/mistralai/client/_hooks/types.py -src/mistralai/client/models/agents_api_v1_conversations_deleteop.py -docs/models/systemmessagecontentchunks.md -src/mistralai/client/models/sharingin.py -docs/models/completionjoboutobject.md -docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md -src/mistralai/client/models/functionresultentry.py -docs/models/agentsapiv1conversationsdeleterequest.md -docs/models/githubrepositoryout.md -docs/models/retrievemodelv1modelsmodelidgetrequest.md -docs/models/conversationstreamrequest.md -docs/models/agentsapiv1conversationsmessagesrequest.md -docs/models/sharingout.md -docs/models/archiveftmodelout.md -docs/models/listdocumentout.md -docs/models/toolreferencechunk.md -docs/models/instructrequestinputs.md -src/mistralai/client/models/deltamessage.py -src/mistralai/client/models/tool.py -src/mistralai/client/beta_agents.py -src/mistralai/client/models/toolcall.py -docs/models/jobin.md -src/mistralai/client/models/libraries_documents_upload_v1op.py -src/mistralai/client/models/toolexecutiondoneevent.py -docs/models/conversationrequestagentversion.md -docs/models/listsharingout.md -docs/models/completiondetailedjoboutrepository.md -docs/models/completionftmodeloutobject.md -src/mistralai/client/models/agentcreationrequest.py -docs/models/functioncallentry.md -src/mistralai/client/models/agents_api_v1_conversations_getop.py -src/mistralai/client/models/filepurpose.py -src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py -src/mistralai/client/models/jobsout.py -docs/models/agentsapiv1conversationsappendrequest.md -docs/models/jobsapiroutesbatchgetbatchjobsrequest.md -src/mistralai/client/models/audiotranscriptionrequest.py -src/mistralai/client/models/agents_api_v1_agents_update_versionop.py -src/mistralai/client/models/prediction.py -docs/models/conversationinputs.md -docs/models/agenthandoffdoneevent.md -docs/models/finetuneablemodeltype.md -src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py -src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py -docs/models/conversationrestartrequest.md -src/mistralai/client/models/ocrimageobject.py -docs/models/security.md -src/mistralai/client/models/libraryinupdate.py -docs/models/fimcompletionrequest.md -docs/models/ocrusageinfo.md -docs/models/completionjoboutintegration.md -src/mistralai/client/libraries.py -src/mistralai/client/models/wandbintegration.py -src/mistralai/client/models/ocrpagedimensions.py -src/mistralai/client/models/jobin.py -docs/models/conversationrestartstreamrequestagentversion.md -src/mistralai/client/models/libraries_documents_reprocess_v1op.py -docs/models/agentsapiv1agentsgetrequest.md -src/mistralai/client/models/paginationinfo.py -src/mistralai/client/models/jobmetadataout.py -docs/models/assistantmessage.md -src/mistralai/client/models/conversationappendstreamrequest.py -docs/models/librariesdocumentsgettextcontentv1request.md -docs/models/realtimetranscriptionerror.md -src/mistralai/client/models/completiondetailedjobout.py -src/mistralai/client/fine_tuning_jobs.py -src/mistralai/client/models/documentout.py -docs/models/librariesgetv1request.md -docs/models/referencechunk.md -src/mistralai/client/models/completiontrainingparameters.py -src/mistralai/client/agents.py -src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py -src/mistralai/client/models/toolchoice.py -docs/models/requestsource.md -docs/models/embeddingrequestinputs.md -src/mistralai/client/models/imagegenerationtool.py -src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py -docs/models/jobsoutobject.md -docs/models/librariesdocumentsreprocessv1request.md -src/mistralai/client/models/audiotranscriptionrequeststream.py -docs/models/tool.md -src/mistralai/client/models/uploadfileout.py -src/mistralai/client/models/timestampgranularity.py -src/mistralai/client/models/metricout.py -docs/models/jobmetadataout.md -src/mistralai/client/models/files_api_routes_upload_fileop.py -docs/models/chatmoderationrequestinputs1.md -src/mistralai/client/models/transcriptionstreameventtypes.py -src/mistralai/client/models/completionchunk.py -src/mistralai/client/models/conversationevents.py -docs/models/agent.md -src/mistralai/client/models/documenttextcontent.py -docs/models/embeddingresponsedata.md -docs/models/codeinterpretertool.md -src/mistralai/client/models/deletemodelout.py -docs/models/agenttool.md -src/mistralai/client/models/completionresponsestreamchoice.py -src/mistralai/client/models/audiochunk.py -docs/models/functioncallevent.md -docs/models/transcriptionstreamtextdelta.md -docs/models/completiontrainingparametersin.md -docs/models/conversationappendrequesthandoffexecution.md -docs/models/chatcompletionchoicefinishreason.md -src/mistralai/client/models/libraries_documents_get_status_v1op.py -docs/models/libraryinupdate.md -src/mistralai/client/models/modelconversation.py -docs/models/completiondetailedjobout.md -docs/models/realtimetranscriptionsessioncreated.md -docs/models/classifierjoboutobject.md -docs/models/filesapiroutesretrievefilerequest.md -src/mistralai/client/models/trainingfile.py -docs/models/multipartbodyparams.md -src/mistralai/client/models/libraries_delete_v1op.py -docs/models/sampletype.md -src/mistralai/client/models/functioncallevent.py -src/mistralai/client/models/imageurlchunk.py -src/mistralai/client/models/libraries_documents_delete_v1op.py -src/mistralai/client/models/agentconversation.py -src/mistralai/client/models/chatclassificationrequest.py -docs/models/ftmodelcapabilitiesout.md -docs/models/classifierftmodelout.md -docs/models/deletemodelv1modelsmodeliddeleterequest.md -docs/models/messageoutputentryrole.md -docs/models/eventout.md -docs/models/systemmessage.md -src/mistralai/client/models/sampletype.py -docs/models/conversationevents.md -docs/models/fileschema.md -src/mistralai/client/models/agentscompletionrequest.py -src/mistralai/client/models/chatmoderationrequest.py -src/mistralai/client/models/classifierftmodelout.py -docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md -docs/models/chatcompletionresponse.md -src/mistralai/client/models/toolmessage.py -src/mistralai/client/accesses.py -src/mistralai/client/models/source.py -docs/models/documenturlchunk.md -docs/models/updateftmodelin.md -src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py -docs/models/toolreferencechunktype.md -src/mistralai/client/models/files_api_routes_get_signed_urlop.py -src/mistralai/client/models/responseerrorevent.py -docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md -docs/models/thinkchunk.md -docs/models/agentcreationrequesttool.md -docs/models/completiondetailedjoboutobject.md -src/mistralai/client/models/filechunk.py -docs/models/agentcreationrequest.md -docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md -docs/models/utils/retryconfig.md -docs/models/loc.md -docs/models/filesignedurl.md -src/mistralai/client/models/embeddingdtype.py -docs/models/chatcompletionstreamrequest.md -docs/models/audioformat.md -docs/models/transcriptionstreamsegmentdelta.md -docs/models/inputsmessage.md -docs/models/instructrequest.md -src/mistralai/client/models/batchjobout.py -docs/models/classifiertargetout.md -docs/models/filesapiroutesgetsignedurlrequest.md -docs/models/conversationappendrequest.md -docs/models/legacyjobmetadataoutobject.md -src/mistralai/client/models/messageoutputentry.py -docs/models/messageinputentryobject.md -src/mistralai/client/models/embeddingresponse.py -src/mistralai/client/models/documenturlchunk.py -docs/models/usermessage.md -src/mistralai/client/models/apiendpoint.py -src/mistralai/client/models/batchjobstatus.py -docs/models/jobsapiroutesbatchgetbatchjobrequest.md -docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md -docs/models/wandbintegration.md -docs/models/conversationmessagesobject.md -docs/models/utils/retryconfig.md -docs/models/fimcompletionstreamrequest.md -docs/models/batchrequest.md -docs/models/agentsapiv1conversationslistresponse.md -docs/models/conversationhistory.md -docs/sdks/agents/README.md -docs/models/transcriptionresponse.md -src/mistralai/client/models/files_api_routes_download_fileop.py -src/mistralai/client/models/embeddingrequest.py -src/mistralai/client/models/transcriptionresponse.py -src/mistralai/client/models/libraries_documents_list_v1op.py -src/mistralai/client/models/githubrepositoryin.py -docs/models/librariesdocumentsgetstatusv1request.md -docs/models/modelconversationtool.md -.gitattributes -docs/models/functioncallentryarguments.md -src/mistralai/client/models/ftclassifierlossfunction.py -src/mistralai/client/batch.py -docs/models/classificationrequest.md -src/mistralai/client/models/realtimetranscriptionerrordetail.py -docs/models/hyperparameters.md -docs/models/utils/retryconfig.md -docs/models/moderationobject.md -docs/models/classifierjoboutstatus.md -docs/models/agentupdaterequesttool.md -docs/models/chatcompletionstreamrequestmessage.md -docs/models/completiondetailedjoboutintegration.md -src/mistralai/client/models/transcriptionstreamtextdelta.py -src/mistralai/client/models/libraries_get_v1op.py -docs/models/agentscompletionrequesttoolchoice.md -src/mistralai/client/models/deletefileout.py -docs/models/completionevent.md -src/mistralai/client/chat.py -src/mistralai/client/models/completiontrainingparametersin.py -docs/models/librariesdocumentsupdatev1request.md -docs/models/instructrequestmessage.md -src/mistralai/client/models/documentupdatein.py -docs/models/toolfilechunk.md -src/mistralai/client/models/messageinputcontentchunks.py -src/mistralai/client/models/files_api_routes_delete_fileop.py -docs/models/utils/retryconfig.md -docs/models/assistantmessagerole.md -docs/sdks/transcriptions/README.md -docs/models/librariessharedeletev1request.md -src/mistralai/client/models/moderationobject.py -docs/models/unarchiveftmodelout.md -src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py -docs/models/messageoutputentrytype.md -docs/models/functioncall.md -docs/models/toolexecutiondeltaevent.md -src/mistralai/client/models/realtimetranscriptionerror.py -docs/models/agentsapiv1agentslistrequest.md -src/mistralai/client/models/websearchpremiumtool.py -src/mistralai/client/models/realtimetranscriptionsessionupdated.py -src/mistralai/client/models/libraries_documents_get_text_content_v1op.py -docs/models/agentscompletionstreamrequesttoolchoice.md -docs/models/textchunk.md -docs/models/toolcall.md -docs/models/assistantmessagecontent.md -src/mistralai/client/models/chatcompletionrequest.py -src/mistralai/client/models/usermessage.py -docs/models/outputcontentchunks.md -docs/models/librariesdocumentsuploadv1request.md -docs/models/entitytype.md -src/mistralai/client/models/basemodelcard.py -docs/models/toolexecutionentrytype.md -docs/models/shareenum.md -docs/models/imageurlunion.md -docs/models/conversationappendstreamrequest.md -docs/models/websearchpremiumtool.md -docs/models/utils/retryconfig.md -docs/models/fimcompletionrequeststop.md -src/mistralai/client/models/classificationtargetresult.py -src/mistralai/client/audio.py -docs/models/chatmoderationrequestinputs3.md -docs/models/response.md -src/mistralai/client/models/referencechunk.py -docs/models/jobinrepository.md -src/mistralai/client/models/files_api_routes_retrieve_fileop.py -src/mistralai/client/sdkconfiguration.py -src/mistralai/client/models/agents_api_v1_conversations_messagesop.py -src/mistralai/client/models/instructrequest.py -src/mistralai/client/models/classifiertargetout.py -docs/models/classifierdetailedjoboutobject.md -src/mistralai/client/models/inputentries.py -src/mistralai/client/models/toolchoiceenum.py -docs/models/chatcompletionstreamrequesttoolchoice.md -docs/models/agentconversation.md -docs/models/utils/retryconfig.md -src/mistralai/client/models/functioncall.py -docs/models/mistralpromptmode.md -docs/models/conversationresponseobject.md -src/mistralai/client/models/ocrtableobject.py -src/mistralai/client/models/toolexecutionentry.py -docs/models/classifierdetailedjobout.md -docs/models/conversationresponse.md -docs/models/agentsapiv1agentslistversionaliasesrequest.md -docs/models/conversationeventsdata.md -src/mistralai/client/models/ocrusageinfo.py -src/mistralai/client/models/ftmodelcard.py -src/mistralai/client/models/libraries_share_list_v1op.py -docs/models/modellistdata.md -docs/models/messageoutputcontentchunks.md -docs/models/modelconversation.md -docs/models/batchjobstatus.md -docs/models/encodingformat.md -docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md -docs/models/utils/retryconfig.md -docs/models/modellist.md -docs/models/textchunktype.md -docs/models/completionargs.md -docs/models/agenthandoffstartedevent.md -docs/models/basemodelcard.md -src/mistralai/client/models/classifierjobout.py -docs/models/batchjobout.md -docs/models/conversationstreamrequestagentversion.md -docs/models/filesapiroutesdownloadfilerequest.md -src/mistralai/client/models/fileschema.py -docs/models/completiontrainingparameters.md -docs/models/wandbintegrationout.md -docs/models/agentobject.md -src/mistralai/client/models/classifierdetailedjobout.py -src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py -USAGE.md -docs/models/deltamessage.md -docs/models/messageinputentry.md -docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md -docs/models/filechunk.md -src/mistralai/client/models/agent.py -src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py -src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py -docs/models/classifierftmodeloutobject.md -src/mistralai/client/models/ftmodelcapabilitiesout.py -src/mistralai/client/models/listsharingout.py -src/mistralai/client/models/systemmessagecontentchunks.py -src/mistralai/client/models/agents_api_v1_agents_updateop.py -docs/models/retrievefileout.md -src/mistralai/client/models/agents_api_v1_conversations_historyop.py -src/mistralai/client/fim.py -docs/models/embeddingdtype.md -src/mistralai/client/models/conversationrestartstreamrequest.py -src/mistralai/client/models/completionargs.py -docs/models/toolexecutionstartedeventname.md -src/mistralai/client/models/transcriptionstreamlanguage.py -docs/models/librariessharelistv1request.md -src/mistralai/client/fine_tuning.py -docs/models/agentsapiv1conversationsrestartrequest.md -docs/models/conversationrestartstreamrequest.md -docs/models/transcriptionstreamlanguage.md -docs/models/toolexecutiondoneeventname.md -docs/models/classifierjoboutintegration.md -docs/models/classifiertrainingparametersin.md -src/mistralai/client/models/agentupdaterequest.py -docs/models/agentscompletionrequestmessage.md -docs/models/chatmoderationrequest.md -docs/models/chatcompletionchoice.md -docs/models/batchjoboutobject.md -docs/models/toolchoiceenum.md -docs/models/ocrrequest.md -src/mistralai/client/models/updateftmodelin.py -docs/models/classifierdetailedjoboutintegration.md -src/mistralai/client/models/agenthandoffdoneevent.py -src/mistralai/client/models/files_api_routes_list_filesop.py -src/mistralai/client/ocr.py -docs/models/embeddingrequest.md -src/mistralai/client/models/conversationstreamrequest.py -src/mistralai/client/models/thinkchunk.py -docs/models/toolchoice.md -docs/models/documentupload.md -docs/models/imageurlchunktype.md -docs/models/conversationrestartrequestagentversion.md -docs/models/transcriptionstreamdone.md -src/mistralai/client/models/libraryout.py -src/mistralai/client/models/conversationappendrequest.py -src/mistralai/client/models/audioformat.py -docs/models/conversationhistoryobject.md -docs/models/ftclassifierlossfunction.md -docs/models/websearchtool.md -docs/models/messageoutputentryobject.md -src/mistralai/client/models/batchjobsout.py -docs/models/agentsapiv1agentsdeleterequest.md -docs/models/libraryout.md -docs/models/batchjobsoutobject.md -docs/models/functionresultentrytype.md -src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py -docs/models/completionjoboutstatus.md -docs/models/documenttextcontent.md -docs/models/legacyjobmetadataout.md -docs/models/prediction.md -src/mistralai/client/models_.py -src/mistralai/client/models/sharingdelete.py -src/mistralai/client/models/usageinfo.py -docs/models/thinkchunktype.md -docs/models/agentsapiv1conversationshistoryrequest.md -src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py -src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py -src/mistralai/client/models/responseformat.py -docs/models/builtinconnectors.md -docs/models/realtimetranscriptionsession.md -docs/models/documentlibrarytool.md -docs/models/toolfilechunktool.md -docs/models/messageinputcontentchunks.md -src/mistralai/client/models/checkpointout.py -src/mistralai/client/models/validationerror.py -docs/models/utils/retryconfig.md -docs/models/chatmoderationrequestinputs2.md -docs/models/format_.md -docs/sdks/batchjobs/README.md -docs/models/agentsapiv1conversationsrestartstreamrequest.md -docs/models/filesapiroutesdeletefilerequest.md -docs/models/toolexecutionentry.md -docs/models/metricout.md -src/mistralai/client/models/completionjobout.py -docs/models/deletefileout.md -docs/models/functioncallentrytype.md -docs/models/filepurpose.md -docs/models/transcriptionsegmentchunk.md -docs/models/usageinfo.md -docs/models/responsev1conversationsget.md -src/mistralai/client/models/ssetypes.py -src/mistralai/client/models/audioencoding.py -docs/models/librariesdocumentsgetsignedurlv1request.md -docs/models/timestampgranularity.md -docs/models/conversationrequesthandoffexecution.md -src/mistralai/client/transcriptions.py -src/mistralai/client/models/function.py -src/mistralai/client/models/toolexecutiondeltaevent.py -docs/models/conversationappendstreamrequesthandoffexecution.md -docs/models/realtimetranscriptionerrordetail.md -docs/models/toolexecutiondeltaeventname.md -src/mistralai/client/models/__init__.py -src/mistralai/client/models/codeinterpretertool.py -docs/models/utils/retryconfig.md -docs/models/completiondetailedjoboutstatus.md -docs/models/librariesdocumentsgetv1request.md -src/mistralai/client/models/messageoutputevent.py -src/mistralai/client/models/agentscompletionstreamrequest.py -src/mistralai/client/models/textchunk.py -docs/models/conversationstreamrequesttool.md -docs/models/systemmessagecontent.md -docs/models/agentsapiv1conversationslistrequest.md -docs/models/chatcompletionstreamrequeststop.md -docs/models/responseerrorevent.md -docs/models/usermessagecontent.md -docs/models/audioencoding.md -docs/models/messageinputentryrole.md -docs/models/inputentries.md -src/mistralai/client/models/agents_api_v1_conversations_restartop.py -src/mistralai/client/models/messageentries.py -docs/models/ocrpageobject.md -src/mistralai/client/models/completionevent.py -src/mistralai/client/models/batchjobin.py -src/mistralai/client/models/requestsource.py -src/mistralai/client/models/fimcompletionrequest.py -docs/models/utils/retryconfig.md -src/mistralai/client/models/sharingout.py -docs/models/messageentries.md -docs/models/jobsoutdata.md -src/mistralai/client/batch_jobs.py -src/mistralai/client/models/messageinputentry.py -docs/models/uploadfileout.md -src/mistralai/client/models/finetuneablemodeltype.py -docs/models/documentupdatein.md -docs/models/toolmessagecontent.md -docs/models/utils/retryconfig.md -docs/models/documentout.md -docs/models/functionname.md -src/mistralai/client/documents.py -src/mistralai/client/models/realtimetranscriptionsessioncreated.py -docs/models/conversationstreamrequesthandoffexecution.md -docs/models/ocrresponse.md -src/mistralai/client/models/libraries_share_create_v1op.py -docs/models/functioncallentryobject.md -docs/models/httpvalidationerror.md -src/mistralai/client/models/agents_api_v1_agents_getop.py -docs/models/responsedoneevent.md -docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md -docs/models/utils/retryconfig.md -src/mistralai/client/models/completionftmodelout.py -docs/models/utils/retryconfig.md -src/mistralai/client/files.py -docs/models/batchjobsout.md -docs/models/audiotranscriptionrequeststream.md -src/mistralai/client/models/functioncallentryarguments.py -docs/models/responsestartedevent.md -src/mistralai/client/models/agents_api_v1_agents_deleteop.py -docs/models/utils/retryconfig.md -docs/models/completionchunk.md -src/mistralai/client/models/agents_api_v1_conversations_listop.py -src/mistralai/client/models/archiveftmodelout.py -docs/models/agentaliasresponse.md -docs/models/realtimetranscriptionsessionupdated.md -docs/models/batcherror.md -docs/models/contentchunk.md -docs/models/source.md -docs/models/utils/retryconfig.md -docs/models/toolexecutiondoneevent.md -docs/models/realtimetranscriptionerrordetailmessage.md -docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md -docs/models/sharingdelete.md -docs/models/agentsapiv1agentscreateorupdatealiasrequest.md -docs/models/completionjobout.md -docs/models/conversationrequest.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/betaagents/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/conversations/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/libraries/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/accesses/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/documents/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/chat/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/classifiers/README.md -docs/models/utils/retryconfig.md -docs/sdks/embeddings/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/files/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/fim/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/finetuningjobs/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/models/README.md -docs/models/utils/retryconfig.md -docs/sdks/ocr/README.md -docs/models/processingstatusout.md -docs/models/completionargsstop.md -docs/models/ocrtableobject.md -src/mistralai/client/models/assistantmessage.py -src/mistralai/client/models/libraries_documents_update_v1op.py -src/mistralai/client/models/agenthandoffstartedevent.py -src/mistralai/client/models/eventout.py -src/mistralai/client/models/toolreferencechunk.py -docs/models/githubrepositoryin.md -src/mistralai/client/models/messageoutputcontentchunks.py -src/mistralai/client/models/agenthandoffentry.py -docs/models/jsonschema.md -docs/models/conversationrestartrequesthandoffexecution.md -docs/models/listfilesout.md -src/mistralai/client/models/transcriptionstreamevents.py -docs/models/ftmodelcard.md -docs/models/jobinintegration.md -src/mistralai/client/models/conversationrestartrequest.py -src/mistralai/client/models/encodingformat.py -docs/models/deltamessagecontent.md -src/mistralai/client/models/outputcontentchunks.py -docs/models/toolfilechunktype.md -src/mistralai/client/_hooks/__init__.py -src/mistralai/client/models/entitytype.py -docs/models/deletemodelout.md -src/mistralai/client/embeddings.py -docs/models/documenturlchunktype.md -docs/models/batchjobin.md -src/mistralai/client/models/wandbintegrationout.py -docs/models/transcriptionstreameventsdata.md -src/mistralai/client/models/classificationresponse.py -docs/models/trainingfile.md -src/mistralai/client/models/transcriptionsegmentchunk.py -docs/models/audiotranscriptionrequest.md -src/mistralai/client/models/githubrepositoryout.py -src/mistralai/client/models/functiontool.py -docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md -docs/models/conversationusageinfo.md -docs/models/ssetypes.md -src/mistralai/client/models/listdocumentout.py -docs/models/libraryin.md -src/mistralai/client/models/libraries_share_delete_v1op.py -src/mistralai/client/models/systemmessage.py -src/mistralai/client/models/chatcompletionstreamrequest.py -src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py -docs/models/filesapirouteslistfilesrequest.md -docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md -src/mistralai/client/models/security.py -docs/models/modelconversationobject.md -src/mistralai/client/models/conversationmessages.py -docs/models/output.md -src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py -src/mistralai/client/models/classificationrequest.py -docs/models/librariesdocumentslistv1request.md -docs/models/toolmessage.md -docs/models/agentsapiv1agentslistversionsrequest.md -src/mistralai/client/models/embeddingresponsedata.py -src/mistralai/client/models/conversationhistory.py -docs/models/librariessharecreatev1request.md -docs/models/messageinputentrycontent.md -src/mistralai/client/models/functioncallentry.py -src/mistralai/client/models/builtinconnectors.py -src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py -src/mistralai/client/models/httpvalidationerror.py -src/mistralai/client/classifiers.py -docs/models/transcriptionsegmentchunktype.md -docs/models/arguments.md -docs/models/checkpointout.md -src/mistralai/client/beta.py -docs/models/archiveftmodeloutobject.md -docs/models/jobsapiroutesbatchcancelbatchjobrequest.md -docs/models/imageurlchunk.md -src/mistralai/client/models/batcherror.py -docs/models/inputs.md diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index a0e535c2..a9e18489 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -2,57 +2,57 @@ speakeasyVersion: 1.685.0 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:544a7fd4d099e72a9a12681b326d44201f1b163e4df2f5fd643d831167255d84 - sourceBlobDigest: sha256:41c72401329a30983907c32a60063da8ccd82137cf79d7f452089b5b83bb9d92 + sourceRevisionDigest: sha256:d303e640ad565cc8a9801519b20dc7eab226efdfdab951c11256962d9e479f74 + sourceBlobDigest: sha256:6e4c789de61b2c9c604bf581e0abbadae90e360491d95ec4247678f4f70cee87 tags: - latest mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:e3dd3079347edf744151936aaee4ec0ce3eeeb8f46b5c7f31f8e224221e879d4 - sourceBlobDigest: sha256:7a525230930debff23fec4e92e3ad2a57889ea46de86cc96d519615709ae8a16 + sourceRevisionDigest: sha256:351c4d392b8b2220c337a207e98ed5665ed27fd85de854871a70c4bc2b9c0784 + sourceBlobDigest: sha256:d79b21f70efb93b0cd261d2044939a288beaf8707a7caae86aca5c4d5de3821b tags: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:4e49849eba5334a3fe4a3d081baa9afdecd8f41dfc4c2a5115bc19ead4d92d13 - sourceBlobDigest: sha256:3ab3c61ac6a4e9fab37d924d516838ca27dd7e57a1b5e9059d4db2ef29efec56 + sourceRevisionDigest: sha256:74d0de7750f6a1878b68c9da683eba7a447d7c367131d0cb8f5c3b1e05829624 + sourceBlobDigest: sha256:41e8354c48993fc29be68959d835ea4f8e0cc1d4b4fbd527afcd970bc02c62a2 tags: - latest targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:544a7fd4d099e72a9a12681b326d44201f1b163e4df2f5fd643d831167255d84 - sourceBlobDigest: sha256:41c72401329a30983907c32a60063da8ccd82137cf79d7f452089b5b83bb9d92 + sourceRevisionDigest: sha256:d303e640ad565cc8a9801519b20dc7eab226efdfdab951c11256962d9e479f74 + sourceBlobDigest: sha256:6e4c789de61b2c9c604bf581e0abbadae90e360491d95ec4247678f4f70cee87 codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:57821a9bf6cfe7001dfcbcaa2f17b233b98c2f79e2d7588540c41750f10b9c05 + codeSamplesRevisionDigest: sha256:0109302b87fa17b0103ef1e372fae76356811b3c552103e659bd5373d537d759 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:e3dd3079347edf744151936aaee4ec0ce3eeeb8f46b5c7f31f8e224221e879d4 - sourceBlobDigest: sha256:7a525230930debff23fec4e92e3ad2a57889ea46de86cc96d519615709ae8a16 + sourceRevisionDigest: sha256:351c4d392b8b2220c337a207e98ed5665ed27fd85de854871a70c4bc2b9c0784 + sourceBlobDigest: sha256:d79b21f70efb93b0cd261d2044939a288beaf8707a7caae86aca5c4d5de3821b codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:404d5964361b3ced085b11e4b8408c36a4a92efe12a97f7497919efdf7594f6f + codeSamplesRevisionDigest: sha256:09bb7cbf291076170d228116db05d1c9606af541b301b6564609c4d76633258a mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:4e49849eba5334a3fe4a3d081baa9afdecd8f41dfc4c2a5115bc19ead4d92d13 - sourceBlobDigest: sha256:3ab3c61ac6a4e9fab37d924d516838ca27dd7e57a1b5e9059d4db2ef29efec56 + sourceRevisionDigest: sha256:74d0de7750f6a1878b68c9da683eba7a447d7c367131d0cb8f5c3b1e05829624 + sourceBlobDigest: sha256:41e8354c48993fc29be68959d835ea4f8e0cc1d4b4fbd527afcd970bc02c62a2 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:debd698577e8da014e900a57194128d867ad76fd0d2e2b361e9d0c298700fc67 + codeSamplesRevisionDigest: sha256:f37fb6188ad25957bef4cadaa03f454a4f9ab0c045db633a46d9cc89af145ba2 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.685.0 sources: mistral-azure-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:v2 mistral-google-cloud-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:v2 mistral-openapi: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:v2 targets: mistralai-azure-sdk: target: python diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index 8557ab42..b45d6b3b 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -3,13 +3,13 @@ speakeasyVersion: 1.685.0 sources: mistral-azure-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:v2 mistral-google-cloud-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:v2 mistral-openapi: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:v2 targets: mistralai-azure-sdk: target: python diff --git a/MIGRATION.md b/MIGRATION.md index 5fb16739..9f39cdb5 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -49,6 +49,19 @@ Some type names have been updated for clarity and consistency: | `HandoffExecution` | `ConversationRequestHandoffExecution` | | `AgentVersion` | `ConversationRequestAgentVersion` | +### Shorter Request/Response Class Names + +Internal request and response wrapper classes now use concise names: + +| Old Name | New Name | +|---|---| +| `JobsAPIRoutesFineTuningArchiveFineTunedModelRequest` | `ArchiveModelRequest` | +| `JobsAPIRoutesFineTuningCreateFineTuningJobResponse` | `CreateFineTuningJobResponse` | +| `FilesAPIRoutesUploadFileRequest` | `UploadFileRequest` | +| `AgentsAPIV1ConversationsAppendRequest` | `AppendConversationRequest` | + +This affects all operation-specific request/response types. Core models like `UserMessage`, `ChatCompletionRequest`, etc. are unchanged. + Enums now accept unknown values for forward compatibility with API changes. --- diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..a169d78f --- /dev/null +++ b/Makefile @@ -0,0 +1,24 @@ +.PHONY: help test-generate update-speakeasy-version + +help: + @echo "Available targets:" + @echo " make test-generate Test SDK generation locally" + @echo " make update-speakeasy-version VERSION=x.y.z Update Speakeasy CLI version" + @echo "" + @echo "Note: Production SDK generation is done via GitHub Actions:" + @echo " .github/workflows/sdk_generation_mistralai_sdk.yaml" + +# Test SDK generation locally. +# For production, use GitHub Actions: .github/workflows/sdk_generation_mistralai_sdk.yaml +# This uses the Speakeasy CLI version defined in .speakeasy/workflow.yaml +test-generate: + speakeasy run --skip-versioning + +# Update the Speakeasy CLI version (the code generator tool). +# This modifies speakeasyVersion in .speakeasy/workflow.yaml and regenerates the SDK. +# Usage: make update-speakeasy-version VERSION=1.685.0 +update-speakeasy-version: +ifndef VERSION + $(error VERSION is required. This is the Speakeasy CLI version (e.g., 1.685.0)) +endif + uv run inv update-speakeasy --version "$(VERSION)" --targets "all" diff --git a/README.md b/README.md index 04cb586c..53de43f5 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,9 @@ $ source ~/.zshenv ## Summary -Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. +Mistral AI API: Dora OpenAPI schema + +Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. @@ -485,6 +487,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [get_version](docs/sdks/betaagents/README.md#get_version) - Retrieve a specific version of an agent. * [create_version_alias](docs/sdks/betaagents/README.md#create_version_alias) - Create or update an agent version alias. * [list_version_aliases](docs/sdks/betaagents/README.md#list_version_aliases) - List all aliases for an agent. +* [delete_version_alias](docs/sdks/betaagents/README.md#delete_version_alias) - Delete an agent version alias. ### [Beta.Conversations](docs/sdks/conversations/README.md) @@ -602,14 +605,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.start_stream(inputs=[ - { - "object": "entry", - "type": "function.result", - "tool_call_id": "", - "result": "", - }, - ], stream=True, completion_args={ + res = mistral.beta.conversations.start_stream(inputs="", stream=True, completion_args={ "response_format": { "type": "text", }, @@ -646,7 +642,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.upload(library_id="a02150d9-5ee0-4877-b62c-28b1fcdf3b76", file={ + res = mistral.beta.libraries.documents.upload(library_id="f973c54e-979a-4464-9d36-8cc31beb21fe", file={ "file_name": "example.file", "content": open("example.file", "rb"), }) @@ -762,7 +758,7 @@ with Mistral( **Inherit from [`MistralError`](./src/mistralai/client/models/mistralerror.py)**: -* [`HTTPValidationError`](./src/mistralai/client/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 52 of 74 methods.* +* [`HTTPValidationError`](./src/mistralai/client/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 53 of 75 methods.* * [`ResponseValidationError`](./src/mistralai/client/models/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute. diff --git a/docs/models/agent.md b/docs/models/agent.md index bd143350..e335d889 100644 --- a/docs/models/agent.md +++ b/docs/models/agent.md @@ -20,4 +20,5 @@ | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | | `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | | `deployment_chat` | *bool* | :heavy_check_mark: | N/A | -| `source` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| `source` | *str* | :heavy_check_mark: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentcreationrequest.md b/docs/models/agentcreationrequest.md index 6a24c00b..f0f0fdbc 100644 --- a/docs/models/agentcreationrequest.md +++ b/docs/models/agentcreationrequest.md @@ -12,4 +12,5 @@ | `name` | *str* | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsgetrequest.md b/docs/models/agentsapiv1agentsgetrequest.md deleted file mode 100644 index ceffe009..00000000 --- a/docs/models/agentsapiv1agentsgetrequest.md +++ /dev/null @@ -1,9 +0,0 @@ -# AgentsAPIV1AgentsGetRequest - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | [OptionalNullable[models.AgentsAPIV1AgentsGetAgentVersion]](../models/agentsapiv1agentsgetagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentupdaterequest.md b/docs/models/agentupdaterequest.md index b276e199..b1830d7b 100644 --- a/docs/models/agentupdaterequest.md +++ b/docs/models/agentupdaterequest.md @@ -13,4 +13,5 @@ | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | | `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsappendrequest.md b/docs/models/appendconversationrequest.md similarity index 96% rename from docs/models/agentsapiv1conversationsappendrequest.md rename to docs/models/appendconversationrequest.md index ac8a00ec..977d8e8b 100644 --- a/docs/models/agentsapiv1conversationsappendrequest.md +++ b/docs/models/appendconversationrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsAppendRequest +# AppendConversationRequest ## Fields diff --git a/docs/models/agentsapiv1conversationsappendstreamrequest.md b/docs/models/appendconversationstreamrequest.md similarity index 96% rename from docs/models/agentsapiv1conversationsappendstreamrequest.md rename to docs/models/appendconversationstreamrequest.md index dbc330f1..a23231c2 100644 --- a/docs/models/agentsapiv1conversationsappendstreamrequest.md +++ b/docs/models/appendconversationstreamrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsAppendStreamRequest +# AppendConversationStreamRequest ## Fields diff --git a/docs/models/archiveftmodelout.md b/docs/models/archiveftmodelout.md index 46a9e755..98fa7b19 100644 --- a/docs/models/archiveftmodelout.md +++ b/docs/models/archiveftmodelout.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.ArchiveFTModelOutObject]](../models/archiveftmodeloutobject.md) | :heavy_minus_sign: | N/A | -| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/archiveftmodeloutobject.md b/docs/models/archiveftmodeloutobject.md deleted file mode 100644 index f6f46889..00000000 --- a/docs/models/archiveftmodeloutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ArchiveFTModelOutObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `MODEL` | model | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md b/docs/models/archivemodelrequest.md similarity index 93% rename from docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md rename to docs/models/archivemodelrequest.md index f9700df5..806d135e 100644 --- a/docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md +++ b/docs/models/archivemodelrequest.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningArchiveFineTunedModelRequest +# ArchiveModelRequest ## Fields diff --git a/docs/models/batchjobout.md b/docs/models/batchjobout.md index cb49649b..5f101173 100644 --- a/docs/models/batchjobout.md +++ b/docs/models/batchjobout.md @@ -3,24 +3,24 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.BatchJobOutObject]](../models/batchjoboutobject.md) | :heavy_minus_sign: | N/A | -| `input_files` | List[*str*] | :heavy_check_mark: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `endpoint` | *str* | :heavy_check_mark: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `output_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `error_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `errors` | List[[models.BatchError](../models/batcherror.md)] | :heavy_check_mark: | N/A | -| `outputs` | List[Dict[str, *Any*]] | :heavy_minus_sign: | N/A | -| `status` | [models.BatchJobStatus](../models/batchjobstatus.md) | :heavy_check_mark: | N/A | -| `created_at` | *int* | :heavy_check_mark: | N/A | -| `total_requests` | *int* | :heavy_check_mark: | N/A | -| `completed_requests` | *int* | :heavy_check_mark: | N/A | -| `succeeded_requests` | *int* | :heavy_check_mark: | N/A | -| `failed_requests` | *int* | :heavy_check_mark: | N/A | -| `started_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `completed_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["batch"]]* | :heavy_minus_sign: | N/A | +| `input_files` | List[*str*] | :heavy_check_mark: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `endpoint` | *str* | :heavy_check_mark: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `output_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `error_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `errors` | List[[models.BatchError](../models/batcherror.md)] | :heavy_check_mark: | N/A | +| `outputs` | List[Dict[str, *Any*]] | :heavy_minus_sign: | N/A | +| `status` | [models.BatchJobStatus](../models/batchjobstatus.md) | :heavy_check_mark: | N/A | +| `created_at` | *int* | :heavy_check_mark: | N/A | +| `total_requests` | *int* | :heavy_check_mark: | N/A | +| `completed_requests` | *int* | :heavy_check_mark: | N/A | +| `succeeded_requests` | *int* | :heavy_check_mark: | N/A | +| `failed_requests` | *int* | :heavy_check_mark: | N/A | +| `started_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `completed_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batchjoboutobject.md b/docs/models/batchjoboutobject.md deleted file mode 100644 index 64ae8965..00000000 --- a/docs/models/batchjoboutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# BatchJobOutObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `BATCH` | batch | \ No newline at end of file diff --git a/docs/models/batchjobsout.md b/docs/models/batchjobsout.md index a76cfdcc..7a9d6f68 100644 --- a/docs/models/batchjobsout.md +++ b/docs/models/batchjobsout.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `data` | List[[models.BatchJobOut](../models/batchjobout.md)] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.BatchJobsOutObject]](../models/batchjobsoutobject.md) | :heavy_minus_sign: | N/A | -| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `data` | List[[models.BatchJobOut](../models/batchjobout.md)] | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["list"]]* | :heavy_minus_sign: | N/A | +| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/batchjobsoutobject.md b/docs/models/batchjobsoutobject.md deleted file mode 100644 index d4bf9f65..00000000 --- a/docs/models/batchjobsoutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# BatchJobsOutObject - - -## Values - -| Name | Value | -| ------ | ------ | -| `LIST` | list | \ No newline at end of file diff --git a/docs/models/jobsapiroutesbatchcancelbatchjobrequest.md b/docs/models/cancelbatchjobrequest.md similarity index 86% rename from docs/models/jobsapiroutesbatchcancelbatchjobrequest.md rename to docs/models/cancelbatchjobrequest.md index c19d0241..f31f843b 100644 --- a/docs/models/jobsapiroutesbatchcancelbatchjobrequest.md +++ b/docs/models/cancelbatchjobrequest.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesBatchCancelBatchJobRequest +# CancelBatchJobRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md b/docs/models/cancelfinetuningjobrequest.md similarity index 88% rename from docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md rename to docs/models/cancelfinetuningjobrequest.md index 883cbac6..6525788c 100644 --- a/docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md +++ b/docs/models/cancelfinetuningjobrequest.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningCancelFineTuningJobRequest +# CancelFineTuningJobRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md b/docs/models/cancelfinetuningjobresponse.md similarity index 83% rename from docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md rename to docs/models/cancelfinetuningjobresponse.md index e0d2e361..c512342e 100644 --- a/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md +++ b/docs/models/cancelfinetuningjobresponse.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningGetFineTuningJobResponse +# CancelFineTuningJobResponse OK diff --git a/docs/models/classifierdetailedjobout.md b/docs/models/classifierdetailedjobout.md index 15f70aeb..fb532449 100644 --- a/docs/models/classifierdetailedjobout.md +++ b/docs/models/classifierdetailedjobout.md @@ -7,13 +7,13 @@ | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | N/A | | `auto_start` | *bool* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | +| `model` | *str* | :heavy_check_mark: | N/A | | `status` | [models.ClassifierDetailedJobOutStatus](../models/classifierdetailedjoboutstatus.md) | :heavy_check_mark: | N/A | | `created_at` | *int* | :heavy_check_mark: | N/A | | `modified_at` | *int* | :heavy_check_mark: | N/A | | `training_files` | List[*str*] | :heavy_check_mark: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.ClassifierDetailedJobOutObject]](../models/classifierdetailedjoboutobject.md) | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | N/A | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `integrations` | List[[models.ClassifierDetailedJobOutIntegration](../models/classifierdetailedjoboutintegration.md)] | :heavy_minus_sign: | N/A | diff --git a/docs/models/classifierdetailedjoboutobject.md b/docs/models/classifierdetailedjoboutobject.md deleted file mode 100644 index 08cbcffc..00000000 --- a/docs/models/classifierdetailedjoboutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ClassifierDetailedJobOutObject - - -## Values - -| Name | Value | -| ----- | ----- | -| `JOB` | job | \ No newline at end of file diff --git a/docs/models/classifierftmodelout.md b/docs/models/classifierftmodelout.md index d7bcd3ca..6e7afbbe 100644 --- a/docs/models/classifierftmodelout.md +++ b/docs/models/classifierftmodelout.md @@ -3,21 +3,21 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.ClassifierFTModelOutObject]](../models/classifierftmodeloutobject.md) | :heavy_minus_sign: | N/A | -| `created` | *int* | :heavy_check_mark: | N/A | -| `owned_by` | *str* | :heavy_check_mark: | N/A | -| `workspace_id` | *str* | :heavy_check_mark: | N/A | -| `root` | *str* | :heavy_check_mark: | N/A | -| `root_version` | *str* | :heavy_check_mark: | N/A | -| `archived` | *bool* | :heavy_check_mark: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | -| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | -| `job` | *str* | :heavy_check_mark: | N/A | -| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | -| `model_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `root_version` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | +| `model_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierftmodeloutobject.md b/docs/models/classifierftmodeloutobject.md deleted file mode 100644 index 9fe05bcf..00000000 --- a/docs/models/classifierftmodeloutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ClassifierFTModelOutObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `MODEL` | model | \ No newline at end of file diff --git a/docs/models/classifierjobout.md b/docs/models/classifierjobout.md index f8259cab..ceecef5d 100644 --- a/docs/models/classifierjobout.md +++ b/docs/models/classifierjobout.md @@ -7,13 +7,13 @@ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | The ID of the job. | | `auto_start` | *bool* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | +| `model` | *str* | :heavy_check_mark: | N/A | | `status` | [models.ClassifierJobOutStatus](../models/classifierjoboutstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | | `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | | `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | -| `object` | [Optional[models.ClassifierJobOutObject]](../models/classifierjoboutobject.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | +| `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | | `integrations` | List[[models.ClassifierJobOutIntegration](../models/classifierjoboutintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | diff --git a/docs/models/classifierjoboutobject.md b/docs/models/classifierjoboutobject.md deleted file mode 100644 index 1b42d547..00000000 --- a/docs/models/classifierjoboutobject.md +++ /dev/null @@ -1,10 +0,0 @@ -# ClassifierJobOutObject - -The object type of the fine-tuning job. - - -## Values - -| Name | Value | -| ----- | ----- | -| `JOB` | job | \ No newline at end of file diff --git a/docs/models/completiondetailedjobout.md b/docs/models/completiondetailedjobout.md index 725ebcde..bc7e5d1c 100644 --- a/docs/models/completiondetailedjobout.md +++ b/docs/models/completiondetailedjobout.md @@ -7,13 +7,13 @@ | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | N/A | | `auto_start` | *bool* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | +| `model` | *str* | :heavy_check_mark: | N/A | | `status` | [models.CompletionDetailedJobOutStatus](../models/completiondetailedjoboutstatus.md) | :heavy_check_mark: | N/A | | `created_at` | *int* | :heavy_check_mark: | N/A | | `modified_at` | *int* | :heavy_check_mark: | N/A | | `training_files` | List[*str*] | :heavy_check_mark: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.CompletionDetailedJobOutObject]](../models/completiondetailedjoboutobject.md) | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | N/A | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `integrations` | List[[models.CompletionDetailedJobOutIntegration](../models/completiondetailedjoboutintegration.md)] | :heavy_minus_sign: | N/A | diff --git a/docs/models/completiondetailedjoboutobject.md b/docs/models/completiondetailedjoboutobject.md deleted file mode 100644 index 1bec88e5..00000000 --- a/docs/models/completiondetailedjoboutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# CompletionDetailedJobOutObject - - -## Values - -| Name | Value | -| ----- | ----- | -| `JOB` | job | \ No newline at end of file diff --git a/docs/models/completionftmodelout.md b/docs/models/completionftmodelout.md index 9ebfa83e..ccd4844f 100644 --- a/docs/models/completionftmodelout.md +++ b/docs/models/completionftmodelout.md @@ -3,20 +3,20 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.CompletionFTModelOutObject]](../models/completionftmodeloutobject.md) | :heavy_minus_sign: | N/A | -| `created` | *int* | :heavy_check_mark: | N/A | -| `owned_by` | *str* | :heavy_check_mark: | N/A | -| `workspace_id` | *str* | :heavy_check_mark: | N/A | -| `root` | *str* | :heavy_check_mark: | N/A | -| `root_version` | *str* | :heavy_check_mark: | N/A | -| `archived` | *bool* | :heavy_check_mark: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | -| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | -| `job` | *str* | :heavy_check_mark: | N/A | -| `model_type` | *Literal["completion"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `root_version` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `model_type` | *Literal["completion"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionftmodeloutobject.md b/docs/models/completionftmodeloutobject.md deleted file mode 100644 index 6f9d858c..00000000 --- a/docs/models/completionftmodeloutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# CompletionFTModelOutObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `MODEL` | model | \ No newline at end of file diff --git a/docs/models/completionjobout.md b/docs/models/completionjobout.md index 84be452f..5eb44eef 100644 --- a/docs/models/completionjobout.md +++ b/docs/models/completionjobout.md @@ -7,13 +7,13 @@ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | The ID of the job. | | `auto_start` | *bool* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | +| `model` | *str* | :heavy_check_mark: | N/A | | `status` | [models.CompletionJobOutStatus](../models/completionjoboutstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | | `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | | `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | -| `object` | [Optional[models.CompletionJobOutObject]](../models/completionjoboutobject.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | +| `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | | `integrations` | List[[models.CompletionJobOutIntegration](../models/completionjoboutintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | diff --git a/docs/models/completionjoboutobject.md b/docs/models/completionjoboutobject.md deleted file mode 100644 index 712b107d..00000000 --- a/docs/models/completionjoboutobject.md +++ /dev/null @@ -1,10 +0,0 @@ -# CompletionJobOutObject - -The object type of the fine-tuning job. - - -## Values - -| Name | Value | -| ----- | ----- | -| `JOB` | job | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md b/docs/models/createfinetuningjobresponse.md similarity index 80% rename from docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md rename to docs/models/createfinetuningjobresponse.md index 13191e90..f82cd793 100644 --- a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md +++ b/docs/models/createfinetuningjobresponse.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningCreateFineTuningJobResponse +# CreateFineTuningJobResponse OK diff --git a/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md b/docs/models/createorupdateagentaliasrequest.md similarity index 90% rename from docs/models/agentsapiv1agentscreateorupdatealiasrequest.md rename to docs/models/createorupdateagentaliasrequest.md index 79406434..af2591eb 100644 --- a/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md +++ b/docs/models/createorupdateagentaliasrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1AgentsCreateOrUpdateAliasRequest +# CreateOrUpdateAgentAliasRequest ## Fields diff --git a/docs/models/deleteagentaliasrequest.md b/docs/models/deleteagentaliasrequest.md new file mode 100644 index 00000000..17812ec4 --- /dev/null +++ b/docs/models/deleteagentaliasrequest.md @@ -0,0 +1,9 @@ +# DeleteAgentAliasRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `alias` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsdeleterequest.md b/docs/models/deleteagentrequest.md similarity index 89% rename from docs/models/agentsapiv1agentsdeleterequest.md rename to docs/models/deleteagentrequest.md index 2799f418..0aaacae4 100644 --- a/docs/models/agentsapiv1agentsdeleterequest.md +++ b/docs/models/deleteagentrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1AgentsDeleteRequest +# DeleteAgentRequest ## Fields diff --git a/docs/models/agentsapiv1conversationsgetrequest.md b/docs/models/deleteconversationrequest.md similarity index 95% rename from docs/models/agentsapiv1conversationsgetrequest.md rename to docs/models/deleteconversationrequest.md index 67d450c8..39d9e5df 100644 --- a/docs/models/agentsapiv1conversationsgetrequest.md +++ b/docs/models/deleteconversationrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsGetRequest +# DeleteConversationRequest ## Fields diff --git a/docs/models/librariesdocumentsgetv1request.md b/docs/models/deletedocumentrequest.md similarity index 91% rename from docs/models/librariesdocumentsgetv1request.md rename to docs/models/deletedocumentrequest.md index 6febc058..eb060099 100644 --- a/docs/models/librariesdocumentsgetv1request.md +++ b/docs/models/deletedocumentrequest.md @@ -1,4 +1,4 @@ -# LibrariesDocumentsGetV1Request +# DeleteDocumentRequest ## Fields diff --git a/docs/models/filesapiroutesdeletefilerequest.md b/docs/models/deletefilerequest.md similarity index 88% rename from docs/models/filesapiroutesdeletefilerequest.md rename to docs/models/deletefilerequest.md index 1b02c2db..bceae901 100644 --- a/docs/models/filesapiroutesdeletefilerequest.md +++ b/docs/models/deletefilerequest.md @@ -1,4 +1,4 @@ -# FilesAPIRoutesDeleteFileRequest +# DeleteFileRequest ## Fields diff --git a/docs/models/librariessharedeletev1request.md b/docs/models/deletelibraryaccessrequest.md similarity index 96% rename from docs/models/librariessharedeletev1request.md rename to docs/models/deletelibraryaccessrequest.md index 850e22ab..c7034b98 100644 --- a/docs/models/librariessharedeletev1request.md +++ b/docs/models/deletelibraryaccessrequest.md @@ -1,4 +1,4 @@ -# LibrariesShareDeleteV1Request +# DeleteLibraryAccessRequest ## Fields diff --git a/docs/models/librariesgetv1request.md b/docs/models/deletelibraryrequest.md similarity index 91% rename from docs/models/librariesgetv1request.md rename to docs/models/deletelibraryrequest.md index 6e1e04c3..c229ad73 100644 --- a/docs/models/librariesgetv1request.md +++ b/docs/models/deletelibraryrequest.md @@ -1,4 +1,4 @@ -# LibrariesGetV1Request +# DeleteLibraryRequest ## Fields diff --git a/docs/models/deletemodelv1modelsmodeliddeleterequest.md b/docs/models/deletemodelrequest.md similarity index 94% rename from docs/models/deletemodelv1modelsmodeliddeleterequest.md rename to docs/models/deletemodelrequest.md index d9bc15fe..d80103f1 100644 --- a/docs/models/deletemodelv1modelsmodeliddeleterequest.md +++ b/docs/models/deletemodelrequest.md @@ -1,4 +1,4 @@ -# DeleteModelV1ModelsModelIDDeleteRequest +# DeleteModelRequest ## Fields diff --git a/docs/models/filesapiroutesdownloadfilerequest.md b/docs/models/downloadfilerequest.md similarity index 88% rename from docs/models/filesapiroutesdownloadfilerequest.md rename to docs/models/downloadfilerequest.md index 8b28cb0e..3f4dc6cc 100644 --- a/docs/models/filesapiroutesdownloadfilerequest.md +++ b/docs/models/downloadfilerequest.md @@ -1,4 +1,4 @@ -# FilesAPIRoutesDownloadFileRequest +# DownloadFileRequest ## Fields diff --git a/docs/models/agentsapiv1agentsgetagentversion.md b/docs/models/getagentagentversion.md similarity index 79% rename from docs/models/agentsapiv1agentsgetagentversion.md rename to docs/models/getagentagentversion.md index 7fb9f2d5..6d7b3f1d 100644 --- a/docs/models/agentsapiv1agentsgetagentversion.md +++ b/docs/models/getagentagentversion.md @@ -1,4 +1,4 @@ -# AgentsAPIV1AgentsGetAgentVersion +# GetAgentAgentVersion ## Supported Types diff --git a/docs/models/getagentrequest.md b/docs/models/getagentrequest.md new file mode 100644 index 00000000..3f729dff --- /dev/null +++ b/docs/models/getagentrequest.md @@ -0,0 +1,9 @@ +# GetAgentRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.GetAgentAgentVersion]](../models/getagentagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsgetversionrequest.md b/docs/models/getagentversionrequest.md similarity index 90% rename from docs/models/agentsapiv1agentsgetversionrequest.md rename to docs/models/getagentversionrequest.md index 96a73589..c98fee9d 100644 --- a/docs/models/agentsapiv1agentsgetversionrequest.md +++ b/docs/models/getagentversionrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1AgentsGetVersionRequest +# GetAgentVersionRequest ## Fields diff --git a/docs/models/jobsapiroutesbatchgetbatchjobrequest.md b/docs/models/getbatchjobrequest.md similarity index 92% rename from docs/models/jobsapiroutesbatchgetbatchjobrequest.md rename to docs/models/getbatchjobrequest.md index 8c259bea..f3c67eb4 100644 --- a/docs/models/jobsapiroutesbatchgetbatchjobrequest.md +++ b/docs/models/getbatchjobrequest.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesBatchGetBatchJobRequest +# GetBatchJobRequest ## Fields diff --git a/docs/models/agentsapiv1conversationshistoryrequest.md b/docs/models/getconversationhistoryrequest.md similarity index 94% rename from docs/models/agentsapiv1conversationshistoryrequest.md rename to docs/models/getconversationhistoryrequest.md index 7e5d39e9..fc90282b 100644 --- a/docs/models/agentsapiv1conversationshistoryrequest.md +++ b/docs/models/getconversationhistoryrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsHistoryRequest +# GetConversationHistoryRequest ## Fields diff --git a/docs/models/agentsapiv1conversationsmessagesrequest.md b/docs/models/getconversationmessagesrequest.md similarity index 94% rename from docs/models/agentsapiv1conversationsmessagesrequest.md rename to docs/models/getconversationmessagesrequest.md index a91ab046..fd037fea 100644 --- a/docs/models/agentsapiv1conversationsmessagesrequest.md +++ b/docs/models/getconversationmessagesrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsMessagesRequest +# GetConversationMessagesRequest ## Fields diff --git a/docs/models/agentsapiv1conversationsdeleterequest.md b/docs/models/getconversationrequest.md similarity index 95% rename from docs/models/agentsapiv1conversationsdeleterequest.md rename to docs/models/getconversationrequest.md index c6eed281..8a66a8b0 100644 --- a/docs/models/agentsapiv1conversationsdeleterequest.md +++ b/docs/models/getconversationrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsDeleteRequest +# GetConversationRequest ## Fields diff --git a/docs/models/librariesdocumentsgetsignedurlv1request.md b/docs/models/getdocumentextractedtextsignedurlrequest.md similarity index 89% rename from docs/models/librariesdocumentsgetsignedurlv1request.md rename to docs/models/getdocumentextractedtextsignedurlrequest.md index 7c08c180..ff703802 100644 --- a/docs/models/librariesdocumentsgetsignedurlv1request.md +++ b/docs/models/getdocumentextractedtextsignedurlrequest.md @@ -1,4 +1,4 @@ -# LibrariesDocumentsGetSignedURLV1Request +# GetDocumentExtractedTextSignedURLRequest ## Fields diff --git a/docs/models/librariesdocumentsdeletev1request.md b/docs/models/getdocumentrequest.md similarity index 90% rename from docs/models/librariesdocumentsdeletev1request.md rename to docs/models/getdocumentrequest.md index efccdb1b..29f62127 100644 --- a/docs/models/librariesdocumentsdeletev1request.md +++ b/docs/models/getdocumentrequest.md @@ -1,4 +1,4 @@ -# LibrariesDocumentsDeleteV1Request +# GetDocumentRequest ## Fields diff --git a/docs/models/librariesdocumentsreprocessv1request.md b/docs/models/getdocumentsignedurlrequest.md similarity index 90% rename from docs/models/librariesdocumentsreprocessv1request.md rename to docs/models/getdocumentsignedurlrequest.md index 196ba17b..72a179c0 100644 --- a/docs/models/librariesdocumentsreprocessv1request.md +++ b/docs/models/getdocumentsignedurlrequest.md @@ -1,4 +1,4 @@ -# LibrariesDocumentsReprocessV1Request +# GetDocumentSignedURLRequest ## Fields diff --git a/docs/models/librariesdocumentsgetstatusv1request.md b/docs/models/getdocumentstatusrequest.md similarity index 90% rename from docs/models/librariesdocumentsgetstatusv1request.md rename to docs/models/getdocumentstatusrequest.md index e6d41875..3557d773 100644 --- a/docs/models/librariesdocumentsgetstatusv1request.md +++ b/docs/models/getdocumentstatusrequest.md @@ -1,4 +1,4 @@ -# LibrariesDocumentsGetStatusV1Request +# GetDocumentStatusRequest ## Fields diff --git a/docs/models/getdocumenttextcontentrequest.md b/docs/models/getdocumenttextcontentrequest.md new file mode 100644 index 00000000..85933401 --- /dev/null +++ b/docs/models/getdocumenttextcontentrequest.md @@ -0,0 +1,9 @@ +# GetDocumentTextContentRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/filesapiroutesgetsignedurlrequest.md b/docs/models/getfilesignedurlrequest.md similarity index 96% rename from docs/models/filesapiroutesgetsignedurlrequest.md rename to docs/models/getfilesignedurlrequest.md index dbe3c801..0be3b288 100644 --- a/docs/models/filesapiroutesgetsignedurlrequest.md +++ b/docs/models/getfilesignedurlrequest.md @@ -1,4 +1,4 @@ -# FilesAPIRoutesGetSignedURLRequest +# GetFileSignedURLRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md b/docs/models/getfinetuningjobrequest.md similarity index 89% rename from docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md rename to docs/models/getfinetuningjobrequest.md index fde19800..f20cb214 100644 --- a/docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md +++ b/docs/models/getfinetuningjobrequest.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningGetFineTuningJobRequest +# GetFineTuningJobRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md b/docs/models/getfinetuningjobresponse.md similarity index 82% rename from docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md rename to docs/models/getfinetuningjobresponse.md index 64f4cca6..1b0568dd 100644 --- a/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md +++ b/docs/models/getfinetuningjobresponse.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningStartFineTuningJobResponse +# GetFineTuningJobResponse OK diff --git a/docs/models/librariesdeletev1request.md b/docs/models/getlibraryrequest.md similarity index 90% rename from docs/models/librariesdeletev1request.md rename to docs/models/getlibraryrequest.md index 68d7e543..2a3acf50 100644 --- a/docs/models/librariesdeletev1request.md +++ b/docs/models/getlibraryrequest.md @@ -1,4 +1,4 @@ -# LibrariesDeleteV1Request +# GetLibraryRequest ## Fields diff --git a/docs/models/jobin.md b/docs/models/jobin.md index 33e6ccc6..62da9072 100644 --- a/docs/models/jobin.md +++ b/docs/models/jobin.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | +| `model` | *str* | :heavy_check_mark: | N/A | | `training_files` | List[[models.TrainingFile](../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md deleted file mode 100644 index 23c52c34..00000000 --- a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md +++ /dev/null @@ -1,17 +0,0 @@ -# JobsAPIRoutesFineTuningGetFineTuningJobsRequest - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | -| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | -| `status` | [OptionalNullable[models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus]](../models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | -| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | -| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | \ No newline at end of file diff --git a/docs/models/jobsout.md b/docs/models/jobsout.md index 977013f7..69f8342a 100644 --- a/docs/models/jobsout.md +++ b/docs/models/jobsout.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `data` | List[[models.JobsOutData](../models/jobsoutdata.md)] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.JobsOutObject]](../models/jobsoutobject.md) | :heavy_minus_sign: | N/A | -| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `data` | List[[models.JobsOutData](../models/jobsoutdata.md)] | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["list"]]* | :heavy_minus_sign: | N/A | +| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/jobsoutobject.md b/docs/models/jobsoutobject.md deleted file mode 100644 index f6c8a2c3..00000000 --- a/docs/models/jobsoutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# JobsOutObject - - -## Values - -| Name | Value | -| ------ | ------ | -| `LIST` | list | \ No newline at end of file diff --git a/docs/models/legacyjobmetadataout.md b/docs/models/legacyjobmetadataout.md index 53a45485..8a712140 100644 --- a/docs/models/legacyjobmetadataout.md +++ b/docs/models/legacyjobmetadataout.md @@ -16,4 +16,4 @@ | `details` | *str* | :heavy_check_mark: | N/A | | | `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | The number of complete passes through the entire training dataset. | 4.2922 | | `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | 10 | -| `object` | [Optional[models.LegacyJobMetadataOutObject]](../models/legacyjobmetadataoutobject.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `object` | *Optional[Literal["job.metadata"]]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/legacyjobmetadataoutobject.md b/docs/models/legacyjobmetadataoutobject.md deleted file mode 100644 index 9873ada8..00000000 --- a/docs/models/legacyjobmetadataoutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# LegacyJobMetadataOutObject - - -## Values - -| Name | Value | -| -------------- | -------------- | -| `JOB_METADATA` | job.metadata | \ No newline at end of file diff --git a/docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md b/docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md deleted file mode 100644 index 14ca66f7..00000000 --- a/docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md +++ /dev/null @@ -1,9 +0,0 @@ -# LibrariesDocumentsGetExtractedTextSignedURLV1Request - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsgettextcontentv1request.md b/docs/models/librariesdocumentsgettextcontentv1request.md deleted file mode 100644 index 2f58a446..00000000 --- a/docs/models/librariesdocumentsgettextcontentv1request.md +++ /dev/null @@ -1,9 +0,0 @@ -# LibrariesDocumentsGetTextContentV1Request - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentslistversionaliasesrequest.md b/docs/models/listagentaliasesrequest.md similarity index 85% rename from docs/models/agentsapiv1agentslistversionaliasesrequest.md rename to docs/models/listagentaliasesrequest.md index 3083bf92..b3570cb8 100644 --- a/docs/models/agentsapiv1agentslistversionaliasesrequest.md +++ b/docs/models/listagentaliasesrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1AgentsListVersionAliasesRequest +# ListAgentAliasesRequest ## Fields diff --git a/docs/models/agentsapiv1agentslistrequest.md b/docs/models/listagentsrequest.md similarity index 84% rename from docs/models/agentsapiv1agentslistrequest.md rename to docs/models/listagentsrequest.md index 8cba1325..79aec3ea 100644 --- a/docs/models/agentsapiv1agentslistrequest.md +++ b/docs/models/listagentsrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1AgentsListRequest +# ListAgentsRequest ## Fields @@ -9,6 +9,7 @@ | `page_size` | *Optional[int]* | :heavy_minus_sign: | Number of agents per page | | `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `sources` | List[[models.RequestSource](../models/requestsource.md)] | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Filter by agent name | +| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | Search agents by name or ID | | `id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentslistversionsrequest.md b/docs/models/listagentversionsrequest.md similarity index 94% rename from docs/models/agentsapiv1agentslistversionsrequest.md rename to docs/models/listagentversionsrequest.md index 91831700..ba8ddaa5 100644 --- a/docs/models/agentsapiv1agentslistversionsrequest.md +++ b/docs/models/listagentversionsrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1AgentsListVersionsRequest +# ListAgentVersionsRequest ## Fields diff --git a/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md b/docs/models/listbatchjobsrequest.md similarity index 90% rename from docs/models/jobsapiroutesbatchgetbatchjobsrequest.md rename to docs/models/listbatchjobsrequest.md index b062b873..19981b24 100644 --- a/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md +++ b/docs/models/listbatchjobsrequest.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesBatchGetBatchJobsRequest +# ListBatchJobsRequest ## Fields @@ -12,4 +12,5 @@ | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `status` | List[[models.BatchJobStatus](../models/batchjobstatus.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `status` | List[[models.BatchJobStatus](../models/batchjobstatus.md)] | :heavy_minus_sign: | N/A | +| `order_by` | [Optional[models.OrderBy]](../models/orderby.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationslistrequest.md b/docs/models/listconversationsrequest.md similarity index 92% rename from docs/models/agentsapiv1conversationslistrequest.md rename to docs/models/listconversationsrequest.md index 62c9011f..d99b4208 100644 --- a/docs/models/agentsapiv1conversationslistrequest.md +++ b/docs/models/listconversationsrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsListRequest +# ListConversationsRequest ## Fields diff --git a/docs/models/agentsapiv1conversationslistresponse.md b/docs/models/listconversationsresponse.md similarity index 84% rename from docs/models/agentsapiv1conversationslistresponse.md rename to docs/models/listconversationsresponse.md index b233ee20..9d611c55 100644 --- a/docs/models/agentsapiv1conversationslistresponse.md +++ b/docs/models/listconversationsresponse.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsListResponse +# ListConversationsResponse ## Supported Types diff --git a/docs/models/librariesdocumentslistv1request.md b/docs/models/listdocumentsrequest.md similarity index 96% rename from docs/models/librariesdocumentslistv1request.md rename to docs/models/listdocumentsrequest.md index 44f63001..369e8edb 100644 --- a/docs/models/librariesdocumentslistv1request.md +++ b/docs/models/listdocumentsrequest.md @@ -1,4 +1,4 @@ -# LibrariesDocumentsListV1Request +# ListDocumentsRequest ## Fields diff --git a/docs/models/filesapirouteslistfilesrequest.md b/docs/models/listfilesrequest.md similarity index 98% rename from docs/models/filesapirouteslistfilesrequest.md rename to docs/models/listfilesrequest.md index 57d11722..2d76a76b 100644 --- a/docs/models/filesapirouteslistfilesrequest.md +++ b/docs/models/listfilesrequest.md @@ -1,4 +1,4 @@ -# FilesAPIRoutesListFilesRequest +# ListFilesRequest ## Fields diff --git a/docs/models/listfinetuningjobsrequest.md b/docs/models/listfinetuningjobsrequest.md new file mode 100644 index 00000000..3a04fc70 --- /dev/null +++ b/docs/models/listfinetuningjobsrequest.md @@ -0,0 +1,17 @@ +# ListFineTuningJobsRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.ListFineTuningJobsStatus]](../models/listfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md b/docs/models/listfinetuningjobsstatus.md similarity index 94% rename from docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md rename to docs/models/listfinetuningjobsstatus.md index 40d57686..07db9ae5 100644 --- a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md +++ b/docs/models/listfinetuningjobsstatus.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningGetFineTuningJobsStatus +# ListFineTuningJobsStatus The current job state to filter on. When set, the other results are not displayed. diff --git a/docs/models/librariessharelistv1request.md b/docs/models/listlibraryaccessesrequest.md similarity index 90% rename from docs/models/librariessharelistv1request.md rename to docs/models/listlibraryaccessesrequest.md index 98bf6d17..d98bcda2 100644 --- a/docs/models/librariessharelistv1request.md +++ b/docs/models/listlibraryaccessesrequest.md @@ -1,4 +1,4 @@ -# LibrariesShareListV1Request +# ListLibraryAccessesRequest ## Fields diff --git a/docs/models/orderby.md b/docs/models/orderby.md new file mode 100644 index 00000000..bba50df1 --- /dev/null +++ b/docs/models/orderby.md @@ -0,0 +1,9 @@ +# OrderBy + + +## Values + +| Name | Value | +| --------------- | --------------- | +| `CREATED` | created | +| `MINUS_CREATED` | -created | \ No newline at end of file diff --git a/docs/models/reprocessdocumentrequest.md b/docs/models/reprocessdocumentrequest.md new file mode 100644 index 00000000..cf3982a8 --- /dev/null +++ b/docs/models/reprocessdocumentrequest.md @@ -0,0 +1,9 @@ +# ReprocessDocumentRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsrestartrequest.md b/docs/models/restartconversationrequest.md similarity index 96% rename from docs/models/agentsapiv1conversationsrestartrequest.md rename to docs/models/restartconversationrequest.md index a18a41f5..f24f14e6 100644 --- a/docs/models/agentsapiv1conversationsrestartrequest.md +++ b/docs/models/restartconversationrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsRestartRequest +# RestartConversationRequest ## Fields diff --git a/docs/models/agentsapiv1conversationsrestartstreamrequest.md b/docs/models/restartconversationstreamrequest.md similarity index 96% rename from docs/models/agentsapiv1conversationsrestartstreamrequest.md rename to docs/models/restartconversationstreamrequest.md index 7548286a..daa661a9 100644 --- a/docs/models/agentsapiv1conversationsrestartstreamrequest.md +++ b/docs/models/restartconversationstreamrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsRestartStreamRequest +# RestartConversationStreamRequest ## Fields diff --git a/docs/models/filesapiroutesretrievefilerequest.md b/docs/models/retrievefilerequest.md similarity index 88% rename from docs/models/filesapiroutesretrievefilerequest.md rename to docs/models/retrievefilerequest.md index 961bae1f..454b9665 100644 --- a/docs/models/filesapiroutesretrievefilerequest.md +++ b/docs/models/retrievefilerequest.md @@ -1,4 +1,4 @@ -# FilesAPIRoutesRetrieveFileRequest +# RetrieveFileRequest ## Fields diff --git a/docs/models/retrievemodelv1modelsmodelidgetrequest.md b/docs/models/retrievemodelrequest.md similarity index 94% rename from docs/models/retrievemodelv1modelsmodelidgetrequest.md rename to docs/models/retrievemodelrequest.md index f1280f88..787c3dd1 100644 --- a/docs/models/retrievemodelv1modelsmodelidgetrequest.md +++ b/docs/models/retrievemodelrequest.md @@ -1,4 +1,4 @@ -# RetrieveModelV1ModelsModelIDGetRequest +# RetrieveModelRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md b/docs/models/startfinetuningjobrequest.md similarity index 84% rename from docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md rename to docs/models/startfinetuningjobrequest.md index 4429fe48..9df5aee8 100644 --- a/docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md +++ b/docs/models/startfinetuningjobrequest.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningStartFineTuningJobRequest +# StartFineTuningJobRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md b/docs/models/startfinetuningjobresponse.md similarity index 82% rename from docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md rename to docs/models/startfinetuningjobresponse.md index 1b331662..dce84c5a 100644 --- a/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md +++ b/docs/models/startfinetuningjobresponse.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningCancelFineTuningJobResponse +# StartFineTuningJobResponse OK diff --git a/docs/models/unarchiveftmodelout.md b/docs/models/unarchiveftmodelout.md index 287c9a00..12c3d745 100644 --- a/docs/models/unarchiveftmodelout.md +++ b/docs/models/unarchiveftmodelout.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.UnarchiveFTModelOutObject]](../models/unarchiveftmodeloutobject.md) | :heavy_minus_sign: | N/A | -| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/unarchiveftmodeloutobject.md b/docs/models/unarchiveftmodeloutobject.md deleted file mode 100644 index 623dcec2..00000000 --- a/docs/models/unarchiveftmodeloutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# UnarchiveFTModelOutObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `MODEL` | model | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md b/docs/models/unarchivemodelrequest.md similarity index 92% rename from docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md rename to docs/models/unarchivemodelrequest.md index 95c1734d..033dad8a 100644 --- a/docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md +++ b/docs/models/unarchivemodelrequest.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest +# UnarchiveModelRequest ## Fields diff --git a/docs/models/agentsapiv1agentsupdaterequest.md b/docs/models/updateagentrequest.md similarity index 96% rename from docs/models/agentsapiv1agentsupdaterequest.md rename to docs/models/updateagentrequest.md index f60f8e5b..358cb71d 100644 --- a/docs/models/agentsapiv1agentsupdaterequest.md +++ b/docs/models/updateagentrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1AgentsUpdateRequest +# UpdateAgentRequest ## Fields diff --git a/docs/models/agentsapiv1agentsupdateversionrequest.md b/docs/models/updateagentversionrequest.md similarity index 89% rename from docs/models/agentsapiv1agentsupdateversionrequest.md rename to docs/models/updateagentversionrequest.md index e937acc9..b83eb867 100644 --- a/docs/models/agentsapiv1agentsupdateversionrequest.md +++ b/docs/models/updateagentversionrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1AgentsUpdateVersionRequest +# UpdateAgentVersionRequest ## Fields diff --git a/docs/models/librariesdocumentsupdatev1request.md b/docs/models/updatedocumentrequest.md similarity index 97% rename from docs/models/librariesdocumentsupdatev1request.md rename to docs/models/updatedocumentrequest.md index 2f18b014..fa5d117a 100644 --- a/docs/models/librariesdocumentsupdatev1request.md +++ b/docs/models/updatedocumentrequest.md @@ -1,4 +1,4 @@ -# LibrariesDocumentsUpdateV1Request +# UpdateDocumentRequest ## Fields diff --git a/docs/models/librariesupdatev1request.md b/docs/models/updatelibraryrequest.md similarity index 97% rename from docs/models/librariesupdatev1request.md rename to docs/models/updatelibraryrequest.md index a68ef7a8..e03883cc 100644 --- a/docs/models/librariesupdatev1request.md +++ b/docs/models/updatelibraryrequest.md @@ -1,4 +1,4 @@ -# LibrariesUpdateV1Request +# UpdateLibraryRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md b/docs/models/updatemodelrequest.md similarity index 95% rename from docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md rename to docs/models/updatemodelrequest.md index 6d93832e..5799c63b 100644 --- a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md +++ b/docs/models/updatemodelrequest.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningUpdateFineTunedModelRequest +# UpdateModelRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md b/docs/models/updatemodelresponse.md similarity index 81% rename from docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md rename to docs/models/updatemodelresponse.md index 54f4c398..275ee77f 100644 --- a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md +++ b/docs/models/updatemodelresponse.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningUpdateFineTunedModelResponse +# UpdateModelResponse OK diff --git a/docs/models/librariessharecreatev1request.md b/docs/models/updateorcreatelibraryaccessrequest.md similarity index 95% rename from docs/models/librariessharecreatev1request.md rename to docs/models/updateorcreatelibraryaccessrequest.md index 4c05241d..e04567b4 100644 --- a/docs/models/librariessharecreatev1request.md +++ b/docs/models/updateorcreatelibraryaccessrequest.md @@ -1,4 +1,4 @@ -# LibrariesShareCreateV1Request +# UpdateOrCreateLibraryAccessRequest ## Fields diff --git a/docs/models/librariesdocumentsuploadv1request.md b/docs/models/uploaddocumentrequest.md similarity index 96% rename from docs/models/librariesdocumentsuploadv1request.md rename to docs/models/uploaddocumentrequest.md index 172a6183..92152b7f 100644 --- a/docs/models/librariesdocumentsuploadv1request.md +++ b/docs/models/uploaddocumentrequest.md @@ -1,4 +1,4 @@ -# LibrariesDocumentsUploadV1Request +# UploadDocumentRequest ## Fields diff --git a/docs/sdks/accesses/README.md b/docs/sdks/accesses/README.md index 64a1e749..c1e3866d 100644 --- a/docs/sdks/accesses/README.md +++ b/docs/sdks/accesses/README.md @@ -16,7 +16,7 @@ Given a library, list all of the Entity that have access and to what level. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -26,7 +26,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.accesses.list(library_id="d2169833-d8e2-416e-a372-76518d3d99c2") + res = mistral.beta.libraries.accesses.list(library_id="9eb628ef-f118-47eb-b3cc-9750c4ca5fb6") # Handle response print(res) @@ -57,7 +57,7 @@ Given a library id, you can create or update the access level of an entity. You ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -67,7 +67,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.accesses.update_or_create(library_id="36de3a24-5b1c-4c8f-9d84-d5642205a976", level="Viewer", share_with_uuid="0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", share_with_type="User") + res = mistral.beta.libraries.accesses.update_or_create(library_id="88bb030c-1cb5-4231-ba13-742c56554876", level="Viewer", share_with_uuid="6a736283-c1fa-49b0-9b6d-ea9309c0a766", share_with_type="Workspace") # Handle response print(res) @@ -102,7 +102,7 @@ Given a library id, you can delete the access level of an entity. An owner canno ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -112,7 +112,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.accesses.delete(library_id="709e3cad-9fb2-4f4e-bf88-143cf1808107", share_with_uuid="b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", share_with_type="User") + res = mistral.beta.libraries.accesses.delete(library_id="fc7ab1cf-e33c-4791-a6e0-95ff1f921c43", share_with_uuid="5818ddff-3568-40f1-a9e4-39d6cb9f5c94", share_with_type="Org") # Handle response print(res) diff --git a/docs/sdks/batchjobs/README.md b/docs/sdks/batchjobs/README.md index 8f2358de..24316d78 100644 --- a/docs/sdks/batchjobs/README.md +++ b/docs/sdks/batchjobs/README.md @@ -15,7 +15,7 @@ Get a list of batch jobs for your organization and user. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -25,7 +25,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.list(page=0, page_size=100, created_by_me=False) + res = mistral.batch.jobs.list(page=0, page_size=100, created_by_me=False, order_by="-created") # Handle response print(res) @@ -44,6 +44,7 @@ with Mistral( | `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `status` | List[[models.BatchJobStatus](../../models/batchjobstatus.md)] | :heavy_minus_sign: | N/A | +| `order_by` | [Optional[models.OrderBy]](../../models/orderby.md) | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -62,7 +63,7 @@ Create a new batch job, it will be queued for processing. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -72,7 +73,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.create(endpoint="/v1/moderations", model="mistral-small-latest", timeout_hours=24) + res = mistral.batch.jobs.create(endpoint="/v1/classifications", model="mistral-small-latest", timeout_hours=24) # Handle response print(res) @@ -111,7 +112,7 @@ Args: ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -121,7 +122,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.get(job_id="4017dc9f-b629-42f4-9700-8c681b9e7f0f") + res = mistral.batch.jobs.get(job_id="358c80a1-79bd-43f0-8f0e-8186713aa3ba") # Handle response print(res) @@ -152,7 +153,7 @@ Request the cancellation of a batch job. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -162,7 +163,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.cancel(job_id="4fb29d1c-535b-4f0a-a1cb-2167f86da569") + res = mistral.batch.jobs.cancel(job_id="393537d7-8b33-4931-a289-7f61f8757eda") # Handle response print(res) diff --git a/docs/sdks/betaagents/README.md b/docs/sdks/betaagents/README.md index 8d23b875..0ef655a3 100644 --- a/docs/sdks/betaagents/README.md +++ b/docs/sdks/betaagents/README.md @@ -16,6 +16,7 @@ * [get_version](#get_version) - Retrieve a specific version of an agent. * [create_version_alias](#create_version_alias) - Create or update an agent version alias. * [list_version_aliases](#list_version_aliases) - List all aliases for an agent. +* [delete_version_alias](#delete_version_alias) - Delete an agent version alias. ## create @@ -23,7 +24,7 @@ Create a new agent giving it instructions, tools, description. The agent is then ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -33,7 +34,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.create(model="LeBaron", name="", completion_args={ + res = mistral.beta.agents.create(model="Mustang", name="", completion_args={ "response_format": { "type": "text", }, @@ -56,6 +57,7 @@ with Mistral( | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -75,7 +77,7 @@ Retrieve a list of agent entities sorted by creation time. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -100,7 +102,8 @@ with Mistral( | `page_size` | *Optional[int]* | :heavy_minus_sign: | Number of agents per page | | `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `sources` | List[[models.RequestSource](../../models/requestsource.md)] | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Filter by agent name | +| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | Search agents by name or ID | | `id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | @@ -122,7 +125,7 @@ Given an agent, retrieve an agent entity with its attributes. The agent_version ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -141,11 +144,11 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | [OptionalNullable[models.AgentsAPIV1AgentsGetAgentVersion]](../../models/agentsapiv1agentsgetagentversion.md) | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.GetAgentAgentVersion]](../../models/getagentagentversion.md) | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -164,7 +167,7 @@ Update an agent attributes and create a new version. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -199,6 +202,7 @@ with Mistral( | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | | `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -218,7 +222,7 @@ Delete an agent entity. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -254,7 +258,7 @@ Switch the version of an agent. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -264,7 +268,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.update_version(agent_id="", version=157995) + res = mistral.beta.agents.update_version(agent_id="", version=958693) # Handle response print(res) @@ -296,7 +300,7 @@ Retrieve all versions for a specific agent with full agent context. Supports pag ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -339,7 +343,7 @@ Get a specific agent version by version number. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -349,7 +353,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.get_version(agent_id="", version="788393") + res = mistral.beta.agents.get_version(agent_id="", version="") # Handle response print(res) @@ -381,7 +385,7 @@ Create a new alias or update an existing alias to point to a specific version. A ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -391,7 +395,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.create_version_alias(agent_id="", alias="", version=595141) + res = mistral.beta.agents.create_version_alias(agent_id="", alias="", version=154719) # Handle response print(res) @@ -424,7 +428,7 @@ Retrieve all version aliases for a specific agent. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -454,6 +458,43 @@ with Mistral( ### Errors +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## delete_version_alias + +Delete an existing alias for an agent. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.agents.delete_version_alias(agent_id="", alias="") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `alias` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index 6aae03c5..c0089f12 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -24,7 +24,7 @@ Create a new conversation, using a base model or an agent and append entries. Co ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -81,7 +81,7 @@ Retrieve a list of conversation entities sorted by creation time. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -109,7 +109,7 @@ with Mistral( ### Response -**[List[models.AgentsAPIV1ConversationsListResponse]](../../models/.md)** +**[List[models.ListConversationsResponse]](../../models/.md)** ### Errors @@ -124,7 +124,7 @@ Given a conversation_id retrieve a conversation entity with its attributes. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -165,7 +165,7 @@ Delete a conversation given a conversation_id. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -201,7 +201,7 @@ Run completion on the history of the conversation and the user entries. Return t ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -251,7 +251,7 @@ Given a conversation_id retrieve all the entries belonging to that conversation. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -292,7 +292,7 @@ Given a conversation_id retrieve all the messages belonging to that conversation ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -333,7 +333,7 @@ Given a conversation_id and an id, recreate a conversation from this point and r ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -386,7 +386,7 @@ Create a new conversation, using a base model or an agent and append entries. Co ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -396,14 +396,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.start_stream(inputs=[ - { - "object": "entry", - "type": "function.result", - "tool_call_id": "", - "result": "", - }, - ], stream=True, completion_args={ + res = mistral.beta.conversations.start_stream(inputs="", stream=True, completion_args={ "response_format": { "type": "text", }, @@ -452,7 +445,7 @@ Run completion on the history of the conversation and the user entries. Return t ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -504,7 +497,7 @@ Given a conversation_id and an id, recreate a conversation from this point and r ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -514,15 +507,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.restart_stream(conversation_id="", inputs=[ - { - "object": "entry", - "type": "message.input", - "role": "assistant", - "content": "", - "prefix": False, - }, - ], from_entry_id="", stream=True, store=True, handoff_execution="server", completion_args={ + res = mistral.beta.conversations.restart_stream(conversation_id="", inputs="", from_entry_id="", stream=True, store=True, handoff_execution="server", completion_args={ "response_format": { "type": "text", }, diff --git a/docs/sdks/documents/README.md b/docs/sdks/documents/README.md index d90e7ee7..97831f86 100644 --- a/docs/sdks/documents/README.md +++ b/docs/sdks/documents/README.md @@ -23,7 +23,7 @@ Given a library, lists the document that have been uploaded to that library. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -33,7 +33,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.list(library_id="5c3ca4cd-62bc-4c71-ad8a-1531ae80d078", page_size=100, page=0, sort_by="created_at", sort_order="desc") + res = mistral.beta.libraries.documents.list(library_id="05e1bda5-99b1-4baf-bb03-905d8e094f74", page_size=100, page=0, sort_by="created_at", sort_order="desc") # Handle response print(res) @@ -70,7 +70,7 @@ Given a library, upload a new document to that library. It is queued for process ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -80,7 +80,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.upload(library_id="a02150d9-5ee0-4877-b62c-28b1fcdf3b76", file={ + res = mistral.beta.libraries.documents.upload(library_id="f973c54e-979a-4464-9d36-8cc31beb21fe", file={ "file_name": "example.file", "content": open("example.file", "rb"), }) @@ -115,7 +115,7 @@ Given a library and a document in this library, you can retrieve the metadata of ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -125,7 +125,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.get(library_id="03d908c8-90a1-44fd-bf3a-8490fb7c9a03", document_id="90973aec-0508-4375-8b00-91d732414745") + res = mistral.beta.libraries.documents.get(library_id="f9902d0a-1ea4-4953-be48-52df6edd302a", document_id="c3e12fd9-e840-46f2-8d4a-79985ed36d24") # Handle response print(res) @@ -157,7 +157,7 @@ Given a library and a document in that library, update the name of that document ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -167,7 +167,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.update(library_id="3ddd8d93-dca5-4a6d-980d-173226c35742", document_id="2a25e44c-b160-40ca-b5c2-b65fb2fcae34") + res = mistral.beta.libraries.documents.update(library_id="3b900c67-d2b6-4637-93f2-3eff2c85f8dd", document_id="66f935fd-37ec-441f-bca5-b1129befcbca") # Handle response print(res) @@ -201,7 +201,7 @@ Given a library and a document in that library, delete that document. The docume ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -211,7 +211,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - mistral.beta.libraries.documents.delete(library_id="005daae9-d42e-407d-82d7-2261c6a1496c", document_id="edc236b0-baff-49a9-884b-4ca36a258da4") + mistral.beta.libraries.documents.delete(library_id="c728d742-7845-462b-84ad-2aacbaf1c7cf", document_id="ed3f5797-846a-4abe-8e30-39b2fd2323e0") # Use the SDK ... @@ -238,7 +238,7 @@ Given a library and a document in that library, you can retrieve the text conten ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -248,7 +248,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.text_content(library_id="1d177215-3b6b-45ba-9fa9-baf773223bec", document_id="60214c91-2aba-4692-a4e6-a53365de8caf") + res = mistral.beta.libraries.documents.text_content(library_id="12689dc1-50df-4a0d-8202-2757f7a8c141", document_id="9d4057e9-d112-437c-911e-6ee031389739") # Handle response print(res) @@ -280,7 +280,7 @@ Given a library and a document in that library, retrieve the processing status o ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -290,7 +290,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.status(library_id="e6906f70-368f-4155-80da-c1718f01bc43", document_id="2c904915-d831-4e9d-a345-8ce405bcef66") + res = mistral.beta.libraries.documents.status(library_id="41bb33c4-7e53-453d-bf21-398bb2862772", document_id="416b95cf-19c8-45af-84be-26aaa3ab3666") # Handle response print(res) @@ -322,7 +322,7 @@ Given a library and a document in that library, retrieve the signed URL of a spe ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -332,7 +332,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.get_signed_url(library_id="23cf6904-a602-4ee8-9f5b-8efc557c336d", document_id="48598486-df71-4994-acbb-1133c72efa8c") + res = mistral.beta.libraries.documents.get_signed_url(library_id="2dbbe172-1374-41be-b03d-a088c733612e", document_id="b5d88764-47f1-4485-9df1-658775428344") # Handle response print(res) @@ -364,7 +364,7 @@ Given a library and a document in that library, retrieve the signed URL of text ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -374,7 +374,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.extracted_text_signed_url(library_id="a6f15de3-1e82-4f95-af82-851499042ef8", document_id="9749d4f9-24e5-4ca2-99a3-a406863f805d") + res = mistral.beta.libraries.documents.extracted_text_signed_url(library_id="46d040ce-ae2e-4891-a54c-cdab6a8f62d8", document_id="3eddbfe2-3fd7-47f5-984b-b378e6950e37") # Handle response print(res) @@ -406,7 +406,7 @@ Given a library and a document in that library, reprocess that document, it will ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -416,7 +416,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - mistral.beta.libraries.documents.reprocess(library_id="51b29371-de8f-4ba4-932b-a0bafb3a7f64", document_id="3052422c-49ca-45ac-a918-cadb35d61fd8") + mistral.beta.libraries.documents.reprocess(library_id="76d357e4-d891-40c6-9d1e-6d6ce5056ee0", document_id="09798d2b-8f46-46c6-9765-8054a82a4bb2") # Use the SDK ... diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index 44c39f8a..ae29b7bf 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -23,7 +23,7 @@ Please contact us if you need to increase these storage limits. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -67,7 +67,7 @@ Returns a list of files that belong to the user's organization. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -114,7 +114,7 @@ Returns information about a specific file. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -124,7 +124,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.retrieve(file_id="f2a27685-ca4e-4dc2-9f2b-88c422c3e0f6") + res = mistral.files.retrieve(file_id="654a62d9-b7ee-49ac-835e-af4153e3c9ec") # Handle response print(res) @@ -154,7 +154,7 @@ Delete a file. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -164,7 +164,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.delete(file_id="3b6d45eb-e30b-416f-8019-f47e2e93d930") + res = mistral.files.delete(file_id="789c27a4-69de-47c6-b67f-cf6e56ce9f41") # Handle response print(res) @@ -194,7 +194,7 @@ Download a file ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -204,7 +204,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.download(file_id="f8919994-a4a1-46b2-8b5b-06335a4300ce") + res = mistral.files.download(file_id="e2ba278e-eac9-4050-ae8e-ec433e124efb") # Handle response print(res) @@ -234,7 +234,7 @@ Get Signed Url ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -244,7 +244,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.get_signed_url(file_id="06a020ab-355c-49a6-b19d-304b7c01699f", expiry=24) + res = mistral.files.get_signed_url(file_id="7a0c108d-9e6b-4c47-990d-a20cba50b283", expiry=24) # Handle response print(res) diff --git a/docs/sdks/finetuningjobs/README.md b/docs/sdks/finetuningjobs/README.md index 63897fd6..fe18feeb 100644 --- a/docs/sdks/finetuningjobs/README.md +++ b/docs/sdks/finetuningjobs/README.md @@ -16,7 +16,7 @@ Get a list of fine-tuning jobs for your organization and user. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -35,19 +35,19 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | -| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | -| `status` | [OptionalNullable[models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus]](../../models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | -| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | -| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.ListFineTuningJobsStatus]](../../models/listfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -65,7 +65,7 @@ Create a new fine-tuning job, it will be queued for processing. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -75,7 +75,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.create(model="Camaro", hyperparameters={ + res = mistral.fine_tuning.jobs.create(model="Countach", hyperparameters={ "learning_rate": 0.0001, }, invalid_sample_skip_percentage=0) @@ -88,7 +88,7 @@ with Mistral( | Parameter | Type | Required | Description | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | +| `model` | *str* | :heavy_check_mark: | N/A | | `hyperparameters` | [models.Hyperparameters](../../models/hyperparameters.md) | :heavy_check_mark: | N/A | | `training_files` | List[[models.TrainingFile](../../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | @@ -103,7 +103,7 @@ with Mistral( ### Response -**[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse](../../models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md)** +**[models.CreateFineTuningJobResponse](../../models/createfinetuningjobresponse.md)** ### Errors @@ -117,7 +117,7 @@ Get a fine-tuned job details by its UUID. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -127,7 +127,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.get(job_id="c167a961-ffca-4bcf-93ac-6169468dd389") + res = mistral.fine_tuning.jobs.get(job_id="2855f873-414e-4cf5-a46e-e589e39ee809") # Handle response print(res) @@ -143,7 +143,7 @@ with Mistral( ### Response -**[models.JobsAPIRoutesFineTuningGetFineTuningJobResponse](../../models/jobsapiroutesfinetuninggetfinetuningjobresponse.md)** +**[models.GetFineTuningJobResponse](../../models/getfinetuningjobresponse.md)** ### Errors @@ -157,7 +157,7 @@ Request the cancellation of a fine tuning job. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -167,7 +167,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.cancel(job_id="6188a2f6-7513-4e0f-89cc-3f8088523a49") + res = mistral.fine_tuning.jobs.cancel(job_id="ee7d6f03-fcbb-43ca-8f17-0388c0832eb9") # Handle response print(res) @@ -183,7 +183,7 @@ with Mistral( ### Response -**[models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse](../../models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md)** +**[models.CancelFineTuningJobResponse](../../models/cancelfinetuningjobresponse.md)** ### Errors @@ -197,7 +197,7 @@ Request the start of a validated fine tuning job. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -207,7 +207,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.start(job_id="56553e4d-0679-471e-b9ac-59a77d671103") + res = mistral.fine_tuning.jobs.start(job_id="da371429-0ec2-4cea-b9c7-73ce3a1dd76f") # Handle response print(res) @@ -223,7 +223,7 @@ with Mistral( ### Response -**[models.JobsAPIRoutesFineTuningStartFineTuningJobResponse](../../models/jobsapiroutesfinetuningstartfinetuningjobresponse.md)** +**[models.StartFineTuningJobResponse](../../models/startfinetuningjobresponse.md)** ### Errors diff --git a/docs/sdks/libraries/README.md b/docs/sdks/libraries/README.md index bbdacf05..8835d0ec 100644 --- a/docs/sdks/libraries/README.md +++ b/docs/sdks/libraries/README.md @@ -18,7 +18,7 @@ List all libraries that you have created or have been shared with you. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -57,7 +57,7 @@ Create a new Library, you will be marked as the owner and only you will have the ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -100,7 +100,7 @@ Given a library id, details information about that Library. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -110,7 +110,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.get(library_id="d0d23a1e-bfe5-45e7-b7bb-22a4ea78d47f") + res = mistral.beta.libraries.get(library_id="44e385d6-783e-4b21-8fae-5181e6817bc4") # Handle response print(res) @@ -141,7 +141,7 @@ Given a library id, deletes it together with all documents that have been upload ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -151,7 +151,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.delete(library_id="6cad0b6e-fd2e-4d11-a48b-21d30fb7c17a") + res = mistral.beta.libraries.delete(library_id="441ba08a-3d1f-4700-8d6f-f32eeed49dff") # Handle response print(res) @@ -182,7 +182,7 @@ Given a library id, you can update the name and description. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -192,7 +192,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.update(library_id="e01880c3-d0b5-4a29-8b1b-abdb8ce917e4") + res = mistral.beta.libraries.update(library_id="27049553-3425-49ce-b965-fcb3a7ab03a3") # Handle response print(res) diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 129ea223..0cbf1bdd 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -19,7 +19,7 @@ List all models available to the user. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -58,7 +58,7 @@ Retrieve information about a model. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -99,7 +99,7 @@ Delete a fine-tuned model. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -140,7 +140,7 @@ Update a model name or description. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -168,7 +168,7 @@ with Mistral( ### Response -**[models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse](../../models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md)** +**[models.UpdateModelResponse](../../models/updatemodelresponse.md)** ### Errors @@ -182,7 +182,7 @@ Archive a fine-tuned model. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -222,7 +222,7 @@ Un-archive a fine-tuned model. ### Example Usage - + ```python from mistralai.client import Mistral import os diff --git a/examples/mistral/jobs/async_jobs.py b/examples/mistral/jobs/async_fine_tuning.py similarity index 97% rename from examples/mistral/jobs/async_jobs.py rename to examples/mistral/jobs/async_fine_tuning.py index 12f9035e..080dbe03 100644 --- a/examples/mistral/jobs/async_jobs.py +++ b/examples/mistral/jobs/async_fine_tuning.py @@ -24,7 +24,7 @@ async def main(): # Create a new job created_job = await client.fine_tuning.jobs.create_async( - model="open-mistral-7b", + model="mistral-small-latest", training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], hyperparameters=CompletionTrainingParametersIn( diff --git a/examples/mistral/jobs/async_jobs_chat.py b/examples/mistral/jobs/async_fine_tuning_chat.py similarity index 99% rename from examples/mistral/jobs/async_jobs_chat.py rename to examples/mistral/jobs/async_fine_tuning_chat.py index f14fb833..f170fed4 100644 --- a/examples/mistral/jobs/async_jobs_chat.py +++ b/examples/mistral/jobs/async_fine_tuning_chat.py @@ -82,7 +82,7 @@ async def main(): ) # Create a new job created_job = await client.fine_tuning.jobs.create_async( - model="open-mistral-7b", + model="mistral-small-latest", training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], hyperparameters=CompletionTrainingParametersIn( diff --git a/examples/mistral/jobs/jobs.py b/examples/mistral/jobs/fine_tuning.py similarity index 97% rename from examples/mistral/jobs/jobs.py rename to examples/mistral/jobs/fine_tuning.py index be3a821f..2d155cc2 100644 --- a/examples/mistral/jobs/jobs.py +++ b/examples/mistral/jobs/fine_tuning.py @@ -22,7 +22,7 @@ def main(): # Create a new job created_job = client.fine_tuning.jobs.create( - model="open-mistral-7b", + model="mistral-small-latest", training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], hyperparameters=CompletionTrainingParametersIn( diff --git a/examples/mistral/jobs/dry_run_job.py b/examples/mistral/jobs/fine_tuning_dry_run.py similarity index 97% rename from examples/mistral/jobs/dry_run_job.py rename to examples/mistral/jobs/fine_tuning_dry_run.py index d4280836..d0c6f733 100644 --- a/examples/mistral/jobs/dry_run_job.py +++ b/examples/mistral/jobs/fine_tuning_dry_run.py @@ -20,7 +20,7 @@ async def main(): # Create a new job dry_run_job = await client.fine_tuning.jobs.create_async( - model="open-mistral-7b", + model="mistral-small-latest", training_files=[{"file_id": training_file.id, "weight": 1}], hyperparameters=CompletionTrainingParametersIn( training_steps=1, diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index 45ed9b17..a7cdba10 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,7 +1,7 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: eb0d097e3bdb7c0784f34ca2af2ce554 + docChecksum: 2bebd9aadeecb18391d46d1dadc340ef docVersion: 1.0.0 speakeasyVersion: 1.685.0 generationVersion: 2.794.1 @@ -12,9 +12,9 @@ management: installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/mistralai_azure published: true persistentEdits: - generation_id: 0d580549-db09-4078-890b-62de0e5fe937 - pristine_commit_hash: b561cb140a25a721f54d0aad3c9a03d419c8fc19 - pristine_tree_hash: d122bdae045ddf46c910e2f5da53d78da18ef009 + generation_id: ecb4f74f-ba8a-4f28-941d-36b3258200bd + pristine_commit_hash: 785c0560d42a9c4cff938392bb6d52d98a2f3529 + pristine_tree_hash: 50ed42d2e4b3d4ecd639935cd1511220354a41d7 features: python: additionalDependencies: 1.0.0 @@ -197,8 +197,8 @@ trackedFiles: pristine_git_object: 1d394500e8ffdd140457575568fc2ce465a1cc3a docs/models/mistralpromptmode.md: id: d17d5db4d3b6 - last_write_checksum: sha1:5ccd31d3804f70b6abb0e5a00bda57b9102225e3 - pristine_git_object: 7416e2037c507d19ac02aed914da1208a2fed0a1 + last_write_checksum: sha1:abcb7205c5086169c7d9449d15ac142448a7d258 + pristine_git_object: c3409d03b9646e21a3793372d06dcae6fef95463 docs/models/ocrimageobject.md: id: b72f3c5853b2 last_write_checksum: sha1:90c5158dec6a7b31c858677b6a8efa1e3cabd504 @@ -213,8 +213,8 @@ trackedFiles: pristine_git_object: 02473d44f73485fd7b7f0031d51bfac835d4036e docs/models/ocrrequest.md: id: 6862a3fc2d0f - last_write_checksum: sha1:f32fcc5916f9eedf7adfaa60beda30a9ec42f32e - pristine_git_object: 76e4da925937fd4bdd42307f116a74d4dbf2bea3 + last_write_checksum: sha1:9311e2c87f8f4512c35a717d3b063f2861f878d4 + pristine_git_object: 87929e53f8a74823b82ecce56d15f22228134fa6 docs/models/ocrresponse.md: id: 30042328fb78 last_write_checksum: sha1:8e4a4ae404ea752f3e9f1108c2a5f89ed6cfb143 @@ -485,8 +485,8 @@ trackedFiles: pristine_git_object: c5bf17528c7cf25bac8f8874f58692c601fcdd76 src/mistralai_azure/models/mistralpromptmode.py: id: f62a521bcdae - last_write_checksum: sha1:fcb16c10986bd6946f79b9e330a4be9f26f7e724 - pristine_git_object: 22fb643896688b68af238f6ac75cf41a00b0511b + last_write_checksum: sha1:82190bc14d2e51440723176cb8108791485c1180 + pristine_git_object: 77230b7e5e61cc662fdc52c72e8b817a15e183c3 src/mistralai_azure/models/no_response_error.py: id: 54523e14f29b last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f @@ -505,8 +505,8 @@ trackedFiles: pristine_git_object: e95718001e07bb89ba2fc9094f88b894572148bb src/mistralai_azure/models/ocrrequest.py: id: 4e574d5fb9be - last_write_checksum: sha1:6ca937598dd92c6c6ab7b8d59363595a3e8760e9 - pristine_git_object: 565a0a30a7f9fae374c14fb5fcb0f19385cf05e4 + last_write_checksum: sha1:1b03dc8b392069f6b142228e74179c8341b09ffa + pristine_git_object: e9c23afcdd7440660f17c7819406d7e603eabbec src/mistralai_azure/models/ocrresponse.py: id: 326a4d9fab25 last_write_checksum: sha1:cf597498a5841a56bbd1aeb8478bd57a01d93cb1 @@ -601,8 +601,8 @@ trackedFiles: pristine_git_object: 4caff4a6b74aeb322bf42cd2070b7bd576ca834a src/mistralai_azure/ocr.py: id: 77e2e0f594ad - last_write_checksum: sha1:a455095c62c2dfad071d70682c2f57e7d64934db - pristine_git_object: da823f816dda9d462a795e9b946d5634ff6d48e2 + last_write_checksum: sha1:7daae9b0c14093d6d0bc0258b0bce008cb845a1e + pristine_git_object: 31e27f6eaa6dcc2b8450656d4a59dd4a7a50a29a src/mistralai_azure/py.typed: id: 98df238e554c last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 diff --git a/packages/mistralai_azure/docs/models/mistralpromptmode.md b/packages/mistralai_azure/docs/models/mistralpromptmode.md index 7416e203..c3409d03 100644 --- a/packages/mistralai_azure/docs/models/mistralpromptmode.md +++ b/packages/mistralai_azure/docs/models/mistralpromptmode.md @@ -1,5 +1,9 @@ # MistralPromptMode +Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. + ## Values diff --git a/packages/mistralai_azure/docs/models/ocrrequest.md b/packages/mistralai_azure/docs/models/ocrrequest.md index 76e4da92..87929e53 100644 --- a/packages/mistralai_azure/docs/models/ocrrequest.md +++ b/packages/mistralai_azure/docs/models/ocrrequest.md @@ -14,6 +14,7 @@ | `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | | `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | {
"type": "text"
} | | `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | {
"type": "text"
} | +| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | | `table_format` | [OptionalNullable[models.TableFormat]](../models/tableformat.md) | :heavy_minus_sign: | N/A | | | `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py b/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py index 22fb6438..77230b7e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py @@ -6,3 +6,7 @@ MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] +r"""Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. +""" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py index 565a0a30..e9c23afc 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py @@ -51,6 +51,8 @@ class OCRRequestTypedDict(TypedDict): r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + document_annotation_prompt: NotRequired[Nullable[str]] + r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" table_format: NotRequired[Nullable[TableFormat]] extract_header: NotRequired[bool] extract_footer: NotRequired[bool] @@ -82,6 +84,9 @@ class OCRRequest(BaseModel): document_annotation_format: OptionalNullable[ResponseFormat] = UNSET r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + document_annotation_prompt: OptionalNullable[str] = UNSET + r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" + table_format: OptionalNullable[TableFormat] = UNSET extract_header: Optional[bool] = None @@ -98,6 +103,7 @@ def serialize_model(self, handler): "image_min_size", "bbox_annotation_format", "document_annotation_format", + "document_annotation_prompt", "table_format", "extract_header", "extract_footer", @@ -110,6 +116,7 @@ def serialize_model(self, handler): "image_min_size", "bbox_annotation_format", "document_annotation_format", + "document_annotation_prompt", "table_format", ] null_default_fields = [] diff --git a/packages/mistralai_azure/src/mistralai_azure/ocr.py b/packages/mistralai_azure/src/mistralai_azure/ocr.py index da823f81..31e27f6e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/ocr.py +++ b/packages/mistralai_azure/src/mistralai_azure/ocr.py @@ -25,6 +25,7 @@ def process( document_annotation_format: OptionalNullable[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = UNSET, + document_annotation_prompt: OptionalNullable[str] = UNSET, table_format: OptionalNullable[models.TableFormat] = UNSET, extract_header: Optional[bool] = None, extract_footer: Optional[bool] = None, @@ -44,6 +45,7 @@ def process( :param image_min_size: Minimum height and width of image to extract :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. :param table_format: :param extract_header: :param extract_footer: @@ -76,6 +78,7 @@ def process( document_annotation_format=utils.get_pydantic_model( document_annotation_format, OptionalNullable[models.ResponseFormat] ), + document_annotation_prompt=document_annotation_prompt, table_format=table_format, extract_header=extract_header, extract_footer=extract_footer, @@ -155,6 +158,7 @@ async def process_async( document_annotation_format: OptionalNullable[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = UNSET, + document_annotation_prompt: OptionalNullable[str] = UNSET, table_format: OptionalNullable[models.TableFormat] = UNSET, extract_header: Optional[bool] = None, extract_footer: Optional[bool] = None, @@ -174,6 +178,7 @@ async def process_async( :param image_min_size: Minimum height and width of image to extract :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. :param table_format: :param extract_header: :param extract_footer: @@ -206,6 +211,7 @@ async def process_async( document_annotation_format=utils.get_pydantic_model( document_annotation_format, OptionalNullable[models.ResponseFormat] ), + document_annotation_prompt=document_annotation_prompt, table_format=table_format, extract_header=extract_header, extract_footer=extract_footer, diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index 0bf3209f..31eb1bc7 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,7 +1,7 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: a7d9a161ca71328c62514af87c72bd88 + docChecksum: d91fd326da9118e6c9dddea48eaf47a7 docVersion: 1.0.0 speakeasyVersion: 1.685.0 generationVersion: 2.794.1 @@ -12,9 +12,9 @@ management: installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/mistralai_gcp published: true persistentEdits: - generation_id: 749d4ba0-3c79-459a-a407-b84537a057da - pristine_commit_hash: ae909165077818f36014ef4d28edaa3572c8cc64 - pristine_tree_hash: f04041c3f961a8702dfa1eaa1185b1b605875f82 + generation_id: e1cf1217-2a08-4cb8-b92c-542b4f885caa + pristine_commit_hash: 57fe0df69b76fe4754f039d49f7c40770fb3097d + pristine_tree_hash: c4c1037865fb86650ada485b300f96784045922f features: python: additionalDependencies: 1.0.0 @@ -197,8 +197,8 @@ trackedFiles: pristine_git_object: 1d394500e8ffdd140457575568fc2ce465a1cc3a docs/models/mistralpromptmode.md: id: d17d5db4d3b6 - last_write_checksum: sha1:5ccd31d3804f70b6abb0e5a00bda57b9102225e3 - pristine_git_object: 7416e2037c507d19ac02aed914da1208a2fed0a1 + last_write_checksum: sha1:abcb7205c5086169c7d9449d15ac142448a7d258 + pristine_git_object: c3409d03b9646e21a3793372d06dcae6fef95463 docs/models/prediction.md: id: 3c70b2262201 last_write_checksum: sha1:ca8a77219e6113f2358a5363e935288d90df0725 @@ -461,8 +461,8 @@ trackedFiles: pristine_git_object: fec729a590b2ea981e01f4af99d8b36ba52b4664 src/mistralai_gcp/models/mistralpromptmode.py: id: d2ba58ed5184 - last_write_checksum: sha1:8518548e80dcd8798ee72c2557c473327ba9289b - pristine_git_object: 1440f6ea9d18139ce5f10eb38d951b0995f74a20 + last_write_checksum: sha1:6fb8323de88682846a2a09e68550f3508a29f1f5 + pristine_git_object: a5cc534f8c53bc87b8451aac1b2a79e695530e71 src/mistralai_gcp/models/no_response_error.py: id: 7a773ba0687f last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f diff --git a/packages/mistralai_gcp/docs/models/mistralpromptmode.md b/packages/mistralai_gcp/docs/models/mistralpromptmode.md index 7416e203..c3409d03 100644 --- a/packages/mistralai_gcp/docs/models/mistralpromptmode.md +++ b/packages/mistralai_gcp/docs/models/mistralpromptmode.md @@ -1,5 +1,9 @@ # MistralPromptMode +Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. + ## Values diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py b/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py index 1440f6ea..a5cc534f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py @@ -6,3 +6,7 @@ MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] +r"""Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. +""" diff --git a/pyproject.toml b/pyproject.toml index 5802feaa..f8006e7d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "2.0.0a2" +version = "2.0.0a3" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index 40ff2c8f..22fc94e5 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -29,7 +29,10 @@ done exclude_files=( "examples/mistral/chat/chatbot_with_streaming.py" "examples/mistral/agents/async_conversation_run_mcp_remote_auth.py" - "examples/mistral/jobs/async_jobs_chat.py" + "examples/mistral/jobs/async_fine_tuning_chat.py" + "examples/mistral/jobs/async_fine_tuning.py" + "examples/mistral/jobs/fine_tuning.py" + "examples/mistral/jobs/fine_tuning_dry_run.py" "examples/mistral/classifier/async_classifier.py" "examples/mistral/mcp_servers/sse_server.py" "examples/mistral/mcp_servers/stdio_server.py" diff --git a/src/mistralai/client/__init__.py b/src/mistralai/client/__init__.py index dd02e42e..481fc916 100644 --- a/src/mistralai/client/__init__.py +++ b/src/mistralai/client/__init__.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f1b791f9d2a5 from ._version import ( __title__, diff --git a/src/mistralai/client/_hooks/__init__.py b/src/mistralai/client/_hooks/__init__.py index 2ee66cdd..66a04e37 100644 --- a/src/mistralai/client/_hooks/__init__.py +++ b/src/mistralai/client/_hooks/__init__.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cef9ff97efd7 from .sdkhooks import * from .types import * diff --git a/src/mistralai/client/_hooks/sdkhooks.py b/src/mistralai/client/_hooks/sdkhooks.py index c9318db4..ecf94240 100644 --- a/src/mistralai/client/_hooks/sdkhooks.py +++ b/src/mistralai/client/_hooks/sdkhooks.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ed1e485b2153 import httpx from .types import ( diff --git a/src/mistralai/client/_hooks/types.py b/src/mistralai/client/_hooks/types.py index e7e1bb7f..036d44b8 100644 --- a/src/mistralai/client/_hooks/types.py +++ b/src/mistralai/client/_hooks/types.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 85cfedfb7582 from abc import ABC, abstractmethod import httpx diff --git a/src/mistralai/client/_version.py b/src/mistralai/client/_version.py index 5a7296a7..814d9ec7 100644 --- a/src/mistralai/client/_version.py +++ b/src/mistralai/client/_version.py @@ -1,12 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cc807b30de19 import importlib.metadata __title__: str = "mistralai" -__version__: str = "2.0.0a2" +__version__: str = "2.0.0a3" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 2.0.0a2 2.794.1 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 2.0.0a3 2.794.1 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/client/accesses.py b/src/mistralai/client/accesses.py index 307c7156..cda484c8 100644 --- a/src/mistralai/client/accesses.py +++ b/src/mistralai/client/accesses.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 76fc53bfcf59 from .basesdk import BaseSDK from mistralai.client import models, utils @@ -45,7 +46,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesShareListV1Request( + request = models.ListLibraryAccessesRequest( library_id=library_id, ) @@ -78,7 +79,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_share_list_v1", + operation_id="ListLibraryAccesses", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -135,7 +136,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesShareListV1Request( + request = models.ListLibraryAccessesRequest( library_id=library_id, ) @@ -168,7 +169,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_share_list_v1", + operation_id="ListLibraryAccesses", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -233,7 +234,7 @@ def update_or_create( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesShareCreateV1Request( + request = models.UpdateOrCreateLibraryAccessRequest( library_id=library_id, sharing_in=models.SharingIn( org_id=org_id, @@ -275,7 +276,7 @@ def update_or_create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_share_create_v1", + operation_id="UpdateOrCreateLibraryAccess", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -340,7 +341,7 @@ async def update_or_create_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesShareCreateV1Request( + request = models.UpdateOrCreateLibraryAccessRequest( library_id=library_id, sharing_in=models.SharingIn( org_id=org_id, @@ -382,7 +383,7 @@ async def update_or_create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_share_create_v1", + operation_id="UpdateOrCreateLibraryAccess", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -445,7 +446,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesShareDeleteV1Request( + request = models.DeleteLibraryAccessRequest( library_id=library_id, sharing_delete=models.SharingDelete( org_id=org_id, @@ -486,7 +487,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_share_delete_v1", + operation_id="DeleteLibraryAccess", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -549,7 +550,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesShareDeleteV1Request( + request = models.DeleteLibraryAccessRequest( library_id=library_id, sharing_delete=models.SharingDelete( org_id=org_id, @@ -590,7 +591,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_share_delete_v1", + operation_id="DeleteLibraryAccess", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security diff --git a/src/mistralai/client/agents.py b/src/mistralai/client/agents.py index d0da9f07..0942cb20 100644 --- a/src/mistralai/client/agents.py +++ b/src/mistralai/client/agents.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e946546e3eaa from .basesdk import BaseSDK from mistralai.client import models, utils diff --git a/src/mistralai/client/audio.py b/src/mistralai/client/audio.py index 2834ade2..f68f063c 100644 --- a/src/mistralai/client/audio.py +++ b/src/mistralai/client/audio.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7a8ed2e90d61 from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration diff --git a/src/mistralai/client/basesdk.py b/src/mistralai/client/basesdk.py index bddc9012..611b4059 100644 --- a/src/mistralai/client/basesdk.py +++ b/src/mistralai/client/basesdk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7518c67b81ea from .sdkconfiguration import SDKConfiguration import httpx diff --git a/src/mistralai/client/batch.py b/src/mistralai/client/batch.py index 586dc235..7e36fd0d 100644 --- a/src/mistralai/client/batch.py +++ b/src/mistralai/client/batch.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cffe114c7ac7 from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration diff --git a/src/mistralai/client/batch_jobs.py b/src/mistralai/client/batch_jobs.py index af8d97b2..752c7652 100644 --- a/src/mistralai/client/batch_jobs.py +++ b/src/mistralai/client/batch_jobs.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3423fec25840 from .basesdk import BaseSDK from datetime import datetime @@ -8,6 +9,7 @@ apiendpoint as models_apiendpoint, batchjobstatus as models_batchjobstatus, batchrequest as models_batchrequest, + listbatchjobsop as models_listbatchjobsop, ) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env @@ -27,6 +29,7 @@ def list( created_after: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET, + order_by: Optional[models_listbatchjobsop.OrderBy] = "-created", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -44,6 +47,7 @@ def list( :param created_after: :param created_by_me: :param status: + :param order_by: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -59,7 +63,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesBatchGetBatchJobsRequest( + request = models.ListBatchJobsRequest( page=page, page_size=page_size, model=model, @@ -68,6 +72,7 @@ def list( created_after=created_after, created_by_me=created_by_me, status=status, + order_by=order_by, ) req = self._build_request( @@ -99,7 +104,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_jobs", + operation_id="ListBatchJobs", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -132,6 +137,7 @@ async def list_async( created_after: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET, + order_by: Optional[models_listbatchjobsop.OrderBy] = "-created", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -149,6 +155,7 @@ async def list_async( :param created_after: :param created_by_me: :param status: + :param order_by: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -164,7 +171,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesBatchGetBatchJobsRequest( + request = models.ListBatchJobsRequest( page=page, page_size=page_size, model=model, @@ -173,6 +180,7 @@ async def list_async( created_after=created_after, created_by_me=created_by_me, status=status, + order_by=order_by, ) req = self._build_request_async( @@ -204,7 +212,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_jobs", + operation_id="ListBatchJobs", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -316,7 +324,7 @@ def create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_batch_create_batch_job", + operation_id="CreateBatchJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -428,7 +436,7 @@ async def create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_batch_create_batch_job", + operation_id="CreateBatchJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -484,7 +492,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesBatchGetBatchJobRequest( + request = models.GetBatchJobRequest( job_id=job_id, inline=inline, ) @@ -518,7 +526,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_job", + operation_id="GetBatchJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -574,7 +582,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesBatchGetBatchJobRequest( + request = models.GetBatchJobRequest( job_id=job_id, inline=inline, ) @@ -608,7 +616,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_job", + operation_id="GetBatchJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -659,7 +667,7 @@ def cancel( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesBatchCancelBatchJobRequest( + request = models.CancelBatchJobRequest( job_id=job_id, ) @@ -692,7 +700,7 @@ def cancel( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_batch_cancel_batch_job", + operation_id="CancelBatchJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -743,7 +751,7 @@ async def cancel_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesBatchCancelBatchJobRequest( + request = models.CancelBatchJobRequest( job_id=job_id, ) @@ -776,7 +784,7 @@ async def cancel_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_batch_cancel_batch_job", + operation_id="CancelBatchJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security diff --git a/src/mistralai/client/beta.py b/src/mistralai/client/beta.py index a1bd409e..65b761d1 100644 --- a/src/mistralai/client/beta.py +++ b/src/mistralai/client/beta.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 981417f45147 from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration diff --git a/src/mistralai/client/beta_agents.py b/src/mistralai/client/beta_agents.py index 1420895e..4e692f17 100644 --- a/src/mistralai/client/beta_agents.py +++ b/src/mistralai/client/beta_agents.py @@ -1,13 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b64ad29b7174 from .basesdk import BaseSDK from mistralai.client import models, utils from mistralai.client._hooks import HookContext from mistralai.client.models import ( agentcreationrequest as models_agentcreationrequest, - agents_api_v1_agents_getop as models_agents_api_v1_agents_getop, agentupdaterequest as models_agentupdaterequest, completionargs as models_completionargs, + getagentop as models_getagentop, requestsource as models_requestsource, ) from mistralai.client.types import OptionalNullable, UNSET @@ -40,6 +41,7 @@ def create( description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, + version_message: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -57,6 +59,7 @@ def create( :param description: :param handoffs: :param metadata: + :param version_message: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -85,6 +88,7 @@ def create( description=description, handoffs=handoffs, metadata=metadata, + version_message=version_message, ) req = self._build_request( @@ -119,7 +123,7 @@ def create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_create", + operation_id="CreateAgent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -168,6 +172,7 @@ async def create_async( description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, + version_message: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -185,6 +190,7 @@ async def create_async( :param description: :param handoffs: :param metadata: + :param version_message: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -213,6 +219,7 @@ async def create_async( description=description, handoffs=handoffs, metadata=metadata, + version_message=version_message, ) req = self._build_request_async( @@ -247,7 +254,7 @@ async def create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_create", + operation_id="CreateAgent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -283,6 +290,7 @@ def list( deployment_chat: OptionalNullable[bool] = UNSET, sources: OptionalNullable[List[models_requestsource.RequestSource]] = UNSET, name: OptionalNullable[str] = UNSET, + search: OptionalNullable[str] = UNSET, id: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -298,7 +306,8 @@ def list( :param page_size: Number of agents per page :param deployment_chat: :param sources: - :param name: + :param name: Filter by agent name + :param search: Search agents by name or ID :param id: :param metadata: :param retries: Override the default retry configuration for this method @@ -316,12 +325,13 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsListRequest( + request = models.ListAgentsRequest( page=page, page_size=page_size, deployment_chat=deployment_chat, sources=sources, name=name, + search=search, id=id, metadata=metadata, ) @@ -355,7 +365,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_list", + operation_id="ListAgents", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -391,6 +401,7 @@ async def list_async( deployment_chat: OptionalNullable[bool] = UNSET, sources: OptionalNullable[List[models_requestsource.RequestSource]] = UNSET, name: OptionalNullable[str] = UNSET, + search: OptionalNullable[str] = UNSET, id: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -406,7 +417,8 @@ async def list_async( :param page_size: Number of agents per page :param deployment_chat: :param sources: - :param name: + :param name: Filter by agent name + :param search: Search agents by name or ID :param id: :param metadata: :param retries: Override the default retry configuration for this method @@ -424,12 +436,13 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsListRequest( + request = models.ListAgentsRequest( page=page, page_size=page_size, deployment_chat=deployment_chat, sources=sources, name=name, + search=search, id=id, metadata=metadata, ) @@ -463,7 +476,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_list", + operation_id="ListAgents", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -497,8 +510,8 @@ def get( agent_id: str, agent_version: OptionalNullable[ Union[ - models_agents_api_v1_agents_getop.AgentsAPIV1AgentsGetAgentVersion, - models_agents_api_v1_agents_getop.AgentsAPIV1AgentsGetAgentVersionTypedDict, + models_getagentop.GetAgentAgentVersion, + models_getagentop.GetAgentAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -527,7 +540,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsGetRequest( + request = models.GetAgentRequest( agent_id=agent_id, agent_version=agent_version, ) @@ -561,7 +574,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_get", + operation_id="GetAgent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -595,8 +608,8 @@ async def get_async( agent_id: str, agent_version: OptionalNullable[ Union[ - models_agents_api_v1_agents_getop.AgentsAPIV1AgentsGetAgentVersion, - models_agents_api_v1_agents_getop.AgentsAPIV1AgentsGetAgentVersionTypedDict, + models_getagentop.GetAgentAgentVersion, + models_getagentop.GetAgentAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -625,7 +638,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsGetRequest( + request = models.GetAgentRequest( agent_id=agent_id, agent_version=agent_version, ) @@ -659,7 +672,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_get", + operation_id="GetAgent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -710,6 +723,7 @@ def update( handoffs: OptionalNullable[List[str]] = UNSET, deployment_chat: OptionalNullable[bool] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, + version_message: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -729,6 +743,7 @@ def update( :param handoffs: :param deployment_chat: :param metadata: + :param version_message: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -744,7 +759,7 @@ def update( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsUpdateRequest( + request = models.UpdateAgentRequest( agent_id=agent_id, agent_update_request=models.AgentUpdateRequest( instructions=instructions, @@ -760,6 +775,7 @@ def update( handoffs=handoffs, deployment_chat=deployment_chat, metadata=metadata, + version_message=version_message, ), ) @@ -799,7 +815,7 @@ def update( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_update", + operation_id="UpdateAgent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -850,6 +866,7 @@ async def update_async( handoffs: OptionalNullable[List[str]] = UNSET, deployment_chat: OptionalNullable[bool] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, + version_message: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -869,6 +886,7 @@ async def update_async( :param handoffs: :param deployment_chat: :param metadata: + :param version_message: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -884,7 +902,7 @@ async def update_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsUpdateRequest( + request = models.UpdateAgentRequest( agent_id=agent_id, agent_update_request=models.AgentUpdateRequest( instructions=instructions, @@ -900,6 +918,7 @@ async def update_async( handoffs=handoffs, deployment_chat=deployment_chat, metadata=metadata, + version_message=version_message, ), ) @@ -939,7 +958,7 @@ async def update_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_update", + operation_id="UpdateAgent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -994,7 +1013,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsDeleteRequest( + request = models.DeleteAgentRequest( agent_id=agent_id, ) @@ -1027,7 +1046,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_delete", + operation_id="DeleteAgent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1082,7 +1101,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsDeleteRequest( + request = models.DeleteAgentRequest( agent_id=agent_id, ) @@ -1115,7 +1134,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_delete", + operation_id="DeleteAgent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1174,7 +1193,7 @@ def update_version( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsUpdateVersionRequest( + request = models.UpdateAgentVersionRequest( agent_id=agent_id, version=version, ) @@ -1208,7 +1227,7 @@ def update_version( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_update_version", + operation_id="UpdateAgentVersion", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1267,7 +1286,7 @@ async def update_version_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsUpdateVersionRequest( + request = models.UpdateAgentVersionRequest( agent_id=agent_id, version=version, ) @@ -1301,7 +1320,7 @@ async def update_version_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_update_version", + operation_id="UpdateAgentVersion", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1362,7 +1381,7 @@ def list_versions( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsListVersionsRequest( + request = models.ListAgentVersionsRequest( agent_id=agent_id, page=page, page_size=page_size, @@ -1397,7 +1416,7 @@ def list_versions( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_list_versions", + operation_id="ListAgentVersions", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1458,7 +1477,7 @@ async def list_versions_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsListVersionsRequest( + request = models.ListAgentVersionsRequest( agent_id=agent_id, page=page, page_size=page_size, @@ -1493,7 +1512,7 @@ async def list_versions_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_list_versions", + operation_id="ListAgentVersions", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1552,7 +1571,7 @@ def get_version( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsGetVersionRequest( + request = models.GetAgentVersionRequest( agent_id=agent_id, version=version, ) @@ -1586,7 +1605,7 @@ def get_version( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_get_version", + operation_id="GetAgentVersion", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1645,7 +1664,7 @@ async def get_version_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsGetVersionRequest( + request = models.GetAgentVersionRequest( agent_id=agent_id, version=version, ) @@ -1679,7 +1698,7 @@ async def get_version_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_get_version", + operation_id="GetAgentVersion", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1740,7 +1759,7 @@ def create_version_alias( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( + request = models.CreateOrUpdateAgentAliasRequest( agent_id=agent_id, alias=alias, version=version, @@ -1775,7 +1794,7 @@ def create_version_alias( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_create_or_update_alias", + operation_id="CreateOrUpdateAgentAlias", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1836,7 +1855,7 @@ async def create_version_alias_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( + request = models.CreateOrUpdateAgentAliasRequest( agent_id=agent_id, alias=alias, version=version, @@ -1871,7 +1890,7 @@ async def create_version_alias_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_create_or_update_alias", + operation_id="CreateOrUpdateAgentAlias", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1928,7 +1947,7 @@ def list_version_aliases( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsListVersionAliasesRequest( + request = models.ListAgentAliasesRequest( agent_id=agent_id, ) @@ -1961,7 +1980,7 @@ def list_version_aliases( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_list_version_aliases", + operation_id="ListAgentAliases", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2018,7 +2037,7 @@ async def list_version_aliases_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsListVersionAliasesRequest( + request = models.ListAgentAliasesRequest( agent_id=agent_id, ) @@ -2051,7 +2070,7 @@ async def list_version_aliases_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_list_version_aliases", + operation_id="ListAgentAliases", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2078,3 +2097,189 @@ async def list_version_aliases_async( raise models.SDKError("API error occurred", http_res, http_res_text) raise models.SDKError("Unexpected response received", http_res) + + def delete_version_alias( + self, + *, + agent_id: str, + alias: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent version alias. + + Delete an existing alias for an agent. + + :param agent_id: + :param alias: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteAgentAliasRequest( + agent_id=agent_id, + alias=alias, + ) + + req = self._build_request( + method="DELETE", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="DeleteAgentAlias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_version_alias_async( + self, + *, + agent_id: str, + alias: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent version alias. + + Delete an existing alias for an agent. + + :param agent_id: + :param alias: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteAgentAliasRequest( + agent_id=agent_id, + alias=alias, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="DeleteAgentAlias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/chat.py b/src/mistralai/client/chat.py index 523e3340..35698d32 100644 --- a/src/mistralai/client/chat.py +++ b/src/mistralai/client/chat.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7eba0f088d47 from .basesdk import BaseSDK from mistralai.client import models, utils diff --git a/src/mistralai/client/classifiers.py b/src/mistralai/client/classifiers.py index 327653d1..3407c4b7 100644 --- a/src/mistralai/client/classifiers.py +++ b/src/mistralai/client/classifiers.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 26e773725732 from .basesdk import BaseSDK from mistralai.client import models, utils diff --git a/src/mistralai/client/conversations.py b/src/mistralai/client/conversations.py index aa037bd2..646b91f3 100644 --- a/src/mistralai/client/conversations.py +++ b/src/mistralai/client/conversations.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 40692a878064 from .basesdk import BaseSDK from mistralai.client import models, utils @@ -60,7 +61,10 @@ async def run_async( inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], instructions: OptionalNullable[str] = UNSET, tools: OptionalNullable[ - Union[List[models.ConversationRequestTool], List[models.ConversationRequestToolTypedDict]] + Union[ + List[models.ConversationRequestTool], + List[models.ConversationRequestToolTypedDict], + ] ] = UNSET, completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] @@ -133,7 +137,10 @@ async def run_stream_async( inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], instructions: OptionalNullable[str] = UNSET, tools: OptionalNullable[ - Union[List[models.ConversationRequestTool], List[models.ConversationRequestToolTypedDict]] + Union[ + List[models.ConversationRequestTool], + List[models.ConversationRequestToolTypedDict], + ] ] = UNSET, completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] @@ -353,7 +360,7 @@ def start( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_start", + operation_id="StartConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -506,7 +513,7 @@ async def start_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_start", + operation_id="StartConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -544,7 +551,7 @@ def list( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.AgentsAPIV1ConversationsListResponse]: + ) -> List[models.ListConversationsResponse]: r"""List all created conversations. Retrieve a list of conversation entities sorted by creation time. @@ -567,7 +574,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsListRequest( + request = models.ListConversationsRequest( page=page, page_size=page_size, metadata=metadata, @@ -602,7 +609,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_list", + operation_id="ListConversations", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -616,7 +623,7 @@ def list( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return unmarshal_json_response( - List[models.AgentsAPIV1ConversationsListResponse], http_res + List[models.ListConversationsResponse], http_res ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( @@ -642,7 +649,7 @@ async def list_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.AgentsAPIV1ConversationsListResponse]: + ) -> List[models.ListConversationsResponse]: r"""List all created conversations. Retrieve a list of conversation entities sorted by creation time. @@ -665,7 +672,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsListRequest( + request = models.ListConversationsRequest( page=page, page_size=page_size, metadata=metadata, @@ -700,7 +707,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_list", + operation_id="ListConversations", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -714,7 +721,7 @@ async def list_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return unmarshal_json_response( - List[models.AgentsAPIV1ConversationsListResponse], http_res + List[models.ListConversationsResponse], http_res ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( @@ -759,7 +766,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsGetRequest( + request = models.GetConversationRequest( conversation_id=conversation_id, ) @@ -792,7 +799,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_get", + operation_id="GetConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -849,7 +856,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsGetRequest( + request = models.GetConversationRequest( conversation_id=conversation_id, ) @@ -882,7 +889,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_get", + operation_id="GetConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -939,7 +946,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsDeleteRequest( + request = models.DeleteConversationRequest( conversation_id=conversation_id, ) @@ -972,7 +979,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_delete", + operation_id="DeleteConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1029,7 +1036,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsDeleteRequest( + request = models.DeleteConversationRequest( conversation_id=conversation_id, ) @@ -1062,7 +1069,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_delete", + operation_id="DeleteConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1139,7 +1146,7 @@ def append( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsAppendRequest( + request = models.AppendConversationRequest( conversation_id=conversation_id, conversation_append_request=models.ConversationAppendRequest( inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), @@ -1188,7 +1195,7 @@ def append( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_append", + operation_id="AppendConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1265,7 +1272,7 @@ async def append_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsAppendRequest( + request = models.AppendConversationRequest( conversation_id=conversation_id, conversation_append_request=models.ConversationAppendRequest( inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), @@ -1314,7 +1321,7 @@ async def append_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_append", + operation_id="AppendConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1371,7 +1378,7 @@ def get_history( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsHistoryRequest( + request = models.GetConversationHistoryRequest( conversation_id=conversation_id, ) @@ -1404,7 +1411,7 @@ def get_history( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_history", + operation_id="GetConversationHistory", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1461,7 +1468,7 @@ async def get_history_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsHistoryRequest( + request = models.GetConversationHistoryRequest( conversation_id=conversation_id, ) @@ -1494,7 +1501,7 @@ async def get_history_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_history", + operation_id="GetConversationHistory", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1551,7 +1558,7 @@ def get_messages( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsMessagesRequest( + request = models.GetConversationMessagesRequest( conversation_id=conversation_id, ) @@ -1584,7 +1591,7 @@ def get_messages( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_messages", + operation_id="GetConversationMessages", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1641,7 +1648,7 @@ async def get_messages_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsMessagesRequest( + request = models.GetConversationMessagesRequest( conversation_id=conversation_id, ) @@ -1674,7 +1681,7 @@ async def get_messages_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_messages", + operation_id="GetConversationMessages", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1762,7 +1769,7 @@ def restart( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsRestartRequest( + request = models.RestartConversationRequest( conversation_id=conversation_id, conversation_restart_request=models.ConversationRestartRequest( inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), @@ -1814,7 +1821,7 @@ def restart( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart", + operation_id="RestartConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1902,7 +1909,7 @@ async def restart_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsRestartRequest( + request = models.RestartConversationRequest( conversation_id=conversation_id, conversation_restart_request=models.ConversationRestartRequest( inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), @@ -1954,7 +1961,7 @@ async def restart_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart", + operation_id="RestartConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2109,7 +2116,7 @@ def start_stream( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_start_stream", + operation_id="StartConversationStream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2271,7 +2278,7 @@ async def start_stream_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_start_stream", + operation_id="StartConversationStream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2355,7 +2362,7 @@ def append_stream( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsAppendStreamRequest( + request = models.AppendConversationStreamRequest( conversation_id=conversation_id, conversation_append_stream_request=models.ConversationAppendStreamRequest( inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), @@ -2404,7 +2411,7 @@ def append_stream( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_append_stream", + operation_id="AppendConversationStream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2488,7 +2495,7 @@ async def append_stream_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsAppendStreamRequest( + request = models.AppendConversationStreamRequest( conversation_id=conversation_id, conversation_append_stream_request=models.ConversationAppendStreamRequest( inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), @@ -2537,7 +2544,7 @@ async def append_stream_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_append_stream", + operation_id="AppendConversationStream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2632,7 +2639,7 @@ def restart_stream( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsRestartStreamRequest( + request = models.RestartConversationStreamRequest( conversation_id=conversation_id, conversation_restart_stream_request=models.ConversationRestartStreamRequest( inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), @@ -2684,7 +2691,7 @@ def restart_stream( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart_stream", + operation_id="RestartConversationStream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2779,7 +2786,7 @@ async def restart_stream_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsRestartStreamRequest( + request = models.RestartConversationStreamRequest( conversation_id=conversation_id, conversation_restart_stream_request=models.ConversationRestartStreamRequest( inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), @@ -2831,7 +2838,7 @@ async def restart_stream_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart_stream", + operation_id="RestartConversationStream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security diff --git a/src/mistralai/client/documents.py b/src/mistralai/client/documents.py index 3316e63b..c78f2944 100644 --- a/src/mistralai/client/documents.py +++ b/src/mistralai/client/documents.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bcc17286c31c from .basesdk import BaseSDK from mistralai.client import models, utils @@ -57,7 +58,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsListV1Request( + request = models.ListDocumentsRequest( library_id=library_id, search=search, page_size=page_size, @@ -96,7 +97,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_list_v1", + operation_id="ListDocuments", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -165,7 +166,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsListV1Request( + request = models.ListDocumentsRequest( library_id=library_id, search=search, page_size=page_size, @@ -204,7 +205,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_list_v1", + operation_id="ListDocuments", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -271,7 +272,7 @@ def upload( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsUploadV1Request( + request = models.UploadDocumentRequest( library_id=library_id, request_body=models.DocumentUpload( file=utils.get_pydantic_model(file, models.File), @@ -310,7 +311,7 @@ def upload( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_upload_v1", + operation_id="UploadDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -377,7 +378,7 @@ async def upload_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsUploadV1Request( + request = models.UploadDocumentRequest( library_id=library_id, request_body=models.DocumentUpload( file=utils.get_pydantic_model(file, models.File), @@ -416,7 +417,7 @@ async def upload_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_upload_v1", + operation_id="UploadDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -475,7 +476,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetV1Request( + request = models.GetDocumentRequest( library_id=library_id, document_id=document_id, ) @@ -509,7 +510,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_v1", + operation_id="GetDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -568,7 +569,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetV1Request( + request = models.GetDocumentRequest( library_id=library_id, document_id=document_id, ) @@ -602,7 +603,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_v1", + operation_id="GetDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -670,7 +671,7 @@ def update( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsUpdateV1Request( + request = models.UpdateDocumentRequest( library_id=library_id, document_id=document_id, document_update_in=models.DocumentUpdateIn( @@ -715,7 +716,7 @@ def update( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_update_v1", + operation_id="UpdateDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -783,7 +784,7 @@ async def update_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsUpdateV1Request( + request = models.UpdateDocumentRequest( library_id=library_id, document_id=document_id, document_update_in=models.DocumentUpdateIn( @@ -828,7 +829,7 @@ async def update_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_update_v1", + operation_id="UpdateDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -887,7 +888,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsDeleteV1Request( + request = models.DeleteDocumentRequest( library_id=library_id, document_id=document_id, ) @@ -921,7 +922,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_delete_v1", + operation_id="DeleteDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -980,7 +981,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsDeleteV1Request( + request = models.DeleteDocumentRequest( library_id=library_id, document_id=document_id, ) @@ -1014,7 +1015,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_delete_v1", + operation_id="DeleteDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1073,7 +1074,7 @@ def text_content( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetTextContentV1Request( + request = models.GetDocumentTextContentRequest( library_id=library_id, document_id=document_id, ) @@ -1107,7 +1108,7 @@ def text_content( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_text_content_v1", + operation_id="GetDocumentTextContent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1166,7 +1167,7 @@ async def text_content_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetTextContentV1Request( + request = models.GetDocumentTextContentRequest( library_id=library_id, document_id=document_id, ) @@ -1200,7 +1201,7 @@ async def text_content_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_text_content_v1", + operation_id="GetDocumentTextContent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1259,7 +1260,7 @@ def status( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetStatusV1Request( + request = models.GetDocumentStatusRequest( library_id=library_id, document_id=document_id, ) @@ -1293,7 +1294,7 @@ def status( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_status_v1", + operation_id="GetDocumentStatus", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1352,7 +1353,7 @@ async def status_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetStatusV1Request( + request = models.GetDocumentStatusRequest( library_id=library_id, document_id=document_id, ) @@ -1386,7 +1387,7 @@ async def status_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_status_v1", + operation_id="GetDocumentStatus", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1445,7 +1446,7 @@ def get_signed_url( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetSignedURLV1Request( + request = models.GetDocumentSignedURLRequest( library_id=library_id, document_id=document_id, ) @@ -1479,7 +1480,7 @@ def get_signed_url( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_signed_url_v1", + operation_id="GetDocumentSignedUrl", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1538,7 +1539,7 @@ async def get_signed_url_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetSignedURLV1Request( + request = models.GetDocumentSignedURLRequest( library_id=library_id, document_id=document_id, ) @@ -1572,7 +1573,7 @@ async def get_signed_url_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_signed_url_v1", + operation_id="GetDocumentSignedUrl", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1631,7 +1632,7 @@ def extracted_text_signed_url( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( + request = models.GetDocumentExtractedTextSignedURLRequest( library_id=library_id, document_id=document_id, ) @@ -1665,7 +1666,7 @@ def extracted_text_signed_url( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_extracted_text_signed_url_v1", + operation_id="GetDocumentExtractedTextSignedUrl", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1724,7 +1725,7 @@ async def extracted_text_signed_url_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( + request = models.GetDocumentExtractedTextSignedURLRequest( library_id=library_id, document_id=document_id, ) @@ -1758,7 +1759,7 @@ async def extracted_text_signed_url_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_extracted_text_signed_url_v1", + operation_id="GetDocumentExtractedTextSignedUrl", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1817,7 +1818,7 @@ def reprocess( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsReprocessV1Request( + request = models.ReprocessDocumentRequest( library_id=library_id, document_id=document_id, ) @@ -1851,7 +1852,7 @@ def reprocess( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_reprocess_v1", + operation_id="ReprocessDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1910,7 +1911,7 @@ async def reprocess_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsReprocessV1Request( + request = models.ReprocessDocumentRequest( library_id=library_id, document_id=document_id, ) @@ -1944,7 +1945,7 @@ async def reprocess_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_reprocess_v1", + operation_id="ReprocessDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security diff --git a/src/mistralai/client/embeddings.py b/src/mistralai/client/embeddings.py index 359f2f62..4a056baa 100644 --- a/src/mistralai/client/embeddings.py +++ b/src/mistralai/client/embeddings.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f9c17258207e from .basesdk import BaseSDK from mistralai.client import models, utils diff --git a/src/mistralai/client/files.py b/src/mistralai/client/files.py index b384cda4..57d389f1 100644 --- a/src/mistralai/client/files.py +++ b/src/mistralai/client/files.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f12df4b2ce43 from .basesdk import BaseSDK import httpx @@ -99,7 +100,7 @@ def upload( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_upload_file", + operation_id="UploadFile", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -201,7 +202,7 @@ async def upload_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_upload_file", + operation_id="UploadFile", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -266,7 +267,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesListFilesRequest( + request = models.ListFilesRequest( page=page, page_size=page_size, include_total=include_total, @@ -306,7 +307,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_list_files", + operation_id="ListFiles", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -371,7 +372,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesListFilesRequest( + request = models.ListFilesRequest( page=page, page_size=page_size, include_total=include_total, @@ -411,7 +412,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_list_files", + operation_id="ListFiles", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -462,7 +463,7 @@ def retrieve( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesRetrieveFileRequest( + request = models.RetrieveFileRequest( file_id=file_id, ) @@ -495,7 +496,7 @@ def retrieve( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_retrieve_file", + operation_id="RetrieveFile", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -546,7 +547,7 @@ async def retrieve_async( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesRetrieveFileRequest( + request = models.RetrieveFileRequest( file_id=file_id, ) @@ -579,7 +580,7 @@ async def retrieve_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_retrieve_file", + operation_id="RetrieveFile", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -630,7 +631,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesDeleteFileRequest( + request = models.DeleteFileRequest( file_id=file_id, ) @@ -663,7 +664,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_delete_file", + operation_id="DeleteFile", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -714,7 +715,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesDeleteFileRequest( + request = models.DeleteFileRequest( file_id=file_id, ) @@ -747,7 +748,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_delete_file", + operation_id="DeleteFile", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -798,7 +799,7 @@ def download( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesDownloadFileRequest( + request = models.DownloadFileRequest( file_id=file_id, ) @@ -831,7 +832,7 @@ def download( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_download_file", + operation_id="DownloadFile", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -884,7 +885,7 @@ async def download_async( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesDownloadFileRequest( + request = models.DownloadFileRequest( file_id=file_id, ) @@ -917,7 +918,7 @@ async def download_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_download_file", + operation_id="DownloadFile", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -970,7 +971,7 @@ def get_signed_url( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesGetSignedURLRequest( + request = models.GetFileSignedURLRequest( file_id=file_id, expiry=expiry, ) @@ -1004,7 +1005,7 @@ def get_signed_url( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_get_signed_url", + operation_id="GetFileSignedUrl", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1055,7 +1056,7 @@ async def get_signed_url_async( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesGetSignedURLRequest( + request = models.GetFileSignedURLRequest( file_id=file_id, expiry=expiry, ) @@ -1089,7 +1090,7 @@ async def get_signed_url_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_get_signed_url", + operation_id="GetFileSignedUrl", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security diff --git a/src/mistralai/client/fim.py b/src/mistralai/client/fim.py index 4a834fe9..be3f7742 100644 --- a/src/mistralai/client/fim.py +++ b/src/mistralai/client/fim.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 217bea5d701d from .basesdk import BaseSDK from mistralai.client import models, utils diff --git a/src/mistralai/client/fine_tuning.py b/src/mistralai/client/fine_tuning.py index aeb832d4..df6bc564 100644 --- a/src/mistralai/client/fine_tuning.py +++ b/src/mistralai/client/fine_tuning.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5d5079bbd54e from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration diff --git a/src/mistralai/client/fine_tuning_jobs.py b/src/mistralai/client/fine_tuning_jobs.py index fb75e8c7..9a28ded1 100644 --- a/src/mistralai/client/fine_tuning_jobs.py +++ b/src/mistralai/client/fine_tuning_jobs.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fa1ea246e0b2 from .basesdk import BaseSDK from datetime import datetime @@ -8,7 +9,7 @@ classifiertargetin as models_classifiertargetin, finetuneablemodeltype as models_finetuneablemodeltype, jobin as models_jobin, - jobs_api_routes_fine_tuning_get_fine_tuning_jobsop as models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop, + listfinetuningjobsop as models_listfinetuningjobsop, trainingfile as models_trainingfile, ) from mistralai.client.types import OptionalNullable, UNSET @@ -28,7 +29,7 @@ def list( created_before: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[ - models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.JobsAPIRoutesFineTuningGetFineTuningJobsStatus + models_listfinetuningjobsop.ListFineTuningJobsStatus ] = UNSET, wandb_project: OptionalNullable[str] = UNSET, wandb_name: OptionalNullable[str] = UNSET, @@ -67,7 +68,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( + request = models.ListFineTuningJobsRequest( page=page, page_size=page_size, model=model, @@ -109,7 +110,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", + operation_id="ListFineTuningJobs", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -141,7 +142,7 @@ async def list_async( created_before: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[ - models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.JobsAPIRoutesFineTuningGetFineTuningJobsStatus + models_listfinetuningjobsop.ListFineTuningJobsStatus ] = UNSET, wandb_project: OptionalNullable[str] = UNSET, wandb_name: OptionalNullable[str] = UNSET, @@ -180,7 +181,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( + request = models.ListFineTuningJobsRequest( page=page, page_size=page_size, model=model, @@ -222,7 +223,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", + operation_id="ListFineTuningJobs", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -286,12 +287,12 @@ def create( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: + ) -> models.CreateFineTuningJobResponse: r"""Create Fine Tuning Job Create a new fine-tuning job, it will be queued for processing. - :param model: The name of the model to fine-tune. + :param model: :param hyperparameters: :param training_files: :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. @@ -373,7 +374,7 @@ def create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", + operation_id="CreateFineTuningJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -385,9 +386,7 @@ def create( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res - ) + return unmarshal_json_response(models.CreateFineTuningJobResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -439,12 +438,12 @@ async def create_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: + ) -> models.CreateFineTuningJobResponse: r"""Create Fine Tuning Job Create a new fine-tuning job, it will be queued for processing. - :param model: The name of the model to fine-tune. + :param model: :param hyperparameters: :param training_files: :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. @@ -526,7 +525,7 @@ async def create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", + operation_id="CreateFineTuningJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -538,9 +537,7 @@ async def create_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res - ) + return unmarshal_json_response(models.CreateFineTuningJobResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -558,7 +555,7 @@ def get( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: + ) -> models.GetFineTuningJobResponse: r"""Get Fine Tuning Job Get a fine-tuned job details by its UUID. @@ -579,7 +576,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( + request = models.GetFineTuningJobRequest( job_id=job_id, ) @@ -612,7 +609,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", + operation_id="GetFineTuningJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -624,9 +621,7 @@ def get( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res - ) + return unmarshal_json_response(models.GetFineTuningJobResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -644,7 +639,7 @@ async def get_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: + ) -> models.GetFineTuningJobResponse: r"""Get Fine Tuning Job Get a fine-tuned job details by its UUID. @@ -665,7 +660,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( + request = models.GetFineTuningJobRequest( job_id=job_id, ) @@ -698,7 +693,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", + operation_id="GetFineTuningJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -710,9 +705,7 @@ async def get_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res - ) + return unmarshal_json_response(models.GetFineTuningJobResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -730,7 +723,7 @@ def cancel( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: + ) -> models.CancelFineTuningJobResponse: r"""Cancel Fine Tuning Job Request the cancellation of a fine tuning job. @@ -751,7 +744,7 @@ def cancel( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( + request = models.CancelFineTuningJobRequest( job_id=job_id, ) @@ -784,7 +777,7 @@ def cancel( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", + operation_id="CancelFineTuningJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -796,9 +789,7 @@ def cancel( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res - ) + return unmarshal_json_response(models.CancelFineTuningJobResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -816,7 +807,7 @@ async def cancel_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: + ) -> models.CancelFineTuningJobResponse: r"""Cancel Fine Tuning Job Request the cancellation of a fine tuning job. @@ -837,7 +828,7 @@ async def cancel_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( + request = models.CancelFineTuningJobRequest( job_id=job_id, ) @@ -870,7 +861,7 @@ async def cancel_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", + operation_id="CancelFineTuningJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -882,9 +873,7 @@ async def cancel_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res - ) + return unmarshal_json_response(models.CancelFineTuningJobResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -902,7 +891,7 @@ def start( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: + ) -> models.StartFineTuningJobResponse: r"""Start Fine Tuning Job Request the start of a validated fine tuning job. @@ -923,7 +912,7 @@ def start( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( + request = models.StartFineTuningJobRequest( job_id=job_id, ) @@ -956,7 +945,7 @@ def start( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", + operation_id="StartFineTuningJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -968,9 +957,7 @@ def start( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res - ) + return unmarshal_json_response(models.StartFineTuningJobResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -988,7 +975,7 @@ async def start_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: + ) -> models.StartFineTuningJobResponse: r"""Start Fine Tuning Job Request the start of a validated fine tuning job. @@ -1009,7 +996,7 @@ async def start_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( + request = models.StartFineTuningJobRequest( job_id=job_id, ) @@ -1042,7 +1029,7 @@ async def start_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", + operation_id="StartFineTuningJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1054,9 +1041,7 @@ async def start_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res - ) + return unmarshal_json_response(models.StartFineTuningJobResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) diff --git a/src/mistralai/client/httpclient.py b/src/mistralai/client/httpclient.py index 89560b56..544af7f8 100644 --- a/src/mistralai/client/httpclient.py +++ b/src/mistralai/client/httpclient.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3e46bde74327 # pyright: reportReturnType = false import asyncio diff --git a/src/mistralai/client/libraries.py b/src/mistralai/client/libraries.py index 03a54741..26ceabe1 100644 --- a/src/mistralai/client/libraries.py +++ b/src/mistralai/client/libraries.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d43a5f78045f from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration @@ -86,7 +87,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_list_v1", + operation_id="ListLibraries", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -163,7 +164,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_list_v1", + operation_id="ListLibraries", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -256,7 +257,7 @@ def create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_create_v1", + operation_id="CreateLibrary", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -355,7 +356,7 @@ async def create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_create_v1", + operation_id="CreateLibrary", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -412,7 +413,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesGetV1Request( + request = models.GetLibraryRequest( library_id=library_id, ) @@ -445,7 +446,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_get_v1", + operation_id="GetLibrary", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -502,7 +503,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesGetV1Request( + request = models.GetLibraryRequest( library_id=library_id, ) @@ -535,7 +536,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_get_v1", + operation_id="GetLibrary", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -592,7 +593,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDeleteV1Request( + request = models.DeleteLibraryRequest( library_id=library_id, ) @@ -625,7 +626,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_delete_v1", + operation_id="DeleteLibrary", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -682,7 +683,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDeleteV1Request( + request = models.DeleteLibraryRequest( library_id=library_id, ) @@ -715,7 +716,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_delete_v1", + operation_id="DeleteLibrary", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -776,7 +777,7 @@ def update( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesUpdateV1Request( + request = models.UpdateLibraryRequest( library_id=library_id, library_in_update=models.LibraryInUpdate( name=name, @@ -816,7 +817,7 @@ def update( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_update_v1", + operation_id="UpdateLibrary", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -877,7 +878,7 @@ async def update_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesUpdateV1Request( + request = models.UpdateLibraryRequest( library_id=library_id, library_in_update=models.LibraryInUpdate( name=name, @@ -917,7 +918,7 @@ async def update_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_update_v1", + operation_id="UpdateLibrary", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security diff --git a/src/mistralai/client/models/__init__.py b/src/mistralai/client/models/__init__.py index 046037c5..093ffcbd 100644 --- a/src/mistralai/client/models/__init__.py +++ b/src/mistralai/client/models/__init__.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e0e8dad92725 from .mistralerror import MistralError from typing import TYPE_CHECKING @@ -36,84 +37,6 @@ AgentHandoffStartedEvent, AgentHandoffStartedEventTypedDict, ) - from .agents_api_v1_agents_create_or_update_aliasop import ( - AgentsAPIV1AgentsCreateOrUpdateAliasRequest, - AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict, - ) - from .agents_api_v1_agents_deleteop import ( - AgentsAPIV1AgentsDeleteRequest, - AgentsAPIV1AgentsDeleteRequestTypedDict, - ) - from .agents_api_v1_agents_get_versionop import ( - AgentsAPIV1AgentsGetVersionRequest, - AgentsAPIV1AgentsGetVersionRequestTypedDict, - ) - from .agents_api_v1_agents_getop import ( - AgentsAPIV1AgentsGetAgentVersion, - AgentsAPIV1AgentsGetAgentVersionTypedDict, - AgentsAPIV1AgentsGetRequest, - AgentsAPIV1AgentsGetRequestTypedDict, - ) - from .agents_api_v1_agents_list_version_aliasesop import ( - AgentsAPIV1AgentsListVersionAliasesRequest, - AgentsAPIV1AgentsListVersionAliasesRequestTypedDict, - ) - from .agents_api_v1_agents_list_versionsop import ( - AgentsAPIV1AgentsListVersionsRequest, - AgentsAPIV1AgentsListVersionsRequestTypedDict, - ) - from .agents_api_v1_agents_listop import ( - AgentsAPIV1AgentsListRequest, - AgentsAPIV1AgentsListRequestTypedDict, - ) - from .agents_api_v1_agents_update_versionop import ( - AgentsAPIV1AgentsUpdateVersionRequest, - AgentsAPIV1AgentsUpdateVersionRequestTypedDict, - ) - from .agents_api_v1_agents_updateop import ( - AgentsAPIV1AgentsUpdateRequest, - AgentsAPIV1AgentsUpdateRequestTypedDict, - ) - from .agents_api_v1_conversations_append_streamop import ( - AgentsAPIV1ConversationsAppendStreamRequest, - AgentsAPIV1ConversationsAppendStreamRequestTypedDict, - ) - from .agents_api_v1_conversations_appendop import ( - AgentsAPIV1ConversationsAppendRequest, - AgentsAPIV1ConversationsAppendRequestTypedDict, - ) - from .agents_api_v1_conversations_deleteop import ( - AgentsAPIV1ConversationsDeleteRequest, - AgentsAPIV1ConversationsDeleteRequestTypedDict, - ) - from .agents_api_v1_conversations_getop import ( - AgentsAPIV1ConversationsGetRequest, - AgentsAPIV1ConversationsGetRequestTypedDict, - ResponseV1ConversationsGet, - ResponseV1ConversationsGetTypedDict, - ) - from .agents_api_v1_conversations_historyop import ( - AgentsAPIV1ConversationsHistoryRequest, - AgentsAPIV1ConversationsHistoryRequestTypedDict, - ) - from .agents_api_v1_conversations_listop import ( - AgentsAPIV1ConversationsListRequest, - AgentsAPIV1ConversationsListRequestTypedDict, - AgentsAPIV1ConversationsListResponse, - AgentsAPIV1ConversationsListResponseTypedDict, - ) - from .agents_api_v1_conversations_messagesop import ( - AgentsAPIV1ConversationsMessagesRequest, - AgentsAPIV1ConversationsMessagesRequestTypedDict, - ) - from .agents_api_v1_conversations_restart_streamop import ( - AgentsAPIV1ConversationsRestartStreamRequest, - AgentsAPIV1ConversationsRestartStreamRequestTypedDict, - ) - from .agents_api_v1_conversations_restartop import ( - AgentsAPIV1ConversationsRestartRequest, - AgentsAPIV1ConversationsRestartRequestTypedDict, - ) from .agentscompletionrequest import ( AgentsCompletionRequest, AgentsCompletionRequestMessage, @@ -141,11 +64,16 @@ AgentUpdateRequestTypedDict, ) from .apiendpoint import APIEndpoint - from .archiveftmodelout import ( - ArchiveFTModelOut, - ArchiveFTModelOutObject, - ArchiveFTModelOutTypedDict, + from .appendconversationop import ( + AppendConversationRequest, + AppendConversationRequestTypedDict, ) + from .appendconversationstreamop import ( + AppendConversationStreamRequest, + AppendConversationStreamRequestTypedDict, + ) + from .archiveftmodelout import ArchiveFTModelOut, ArchiveFTModelOutTypedDict + from .archivemodelop import ArchiveModelRequest, ArchiveModelRequestTypedDict from .assistantmessage import ( AssistantMessage, AssistantMessageContent, @@ -167,11 +95,18 @@ from .basemodelcard import BaseModelCard, BaseModelCardTypedDict from .batcherror import BatchError, BatchErrorTypedDict from .batchjobin import BatchJobIn, BatchJobInTypedDict - from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict - from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict + from .batchjobout import BatchJobOut, BatchJobOutTypedDict + from .batchjobsout import BatchJobsOut, BatchJobsOutTypedDict from .batchjobstatus import BatchJobStatus from .batchrequest import BatchRequest, BatchRequestTypedDict from .builtinconnectors import BuiltInConnectors + from .cancelbatchjobop import CancelBatchJobRequest, CancelBatchJobRequestTypedDict + from .cancelfinetuningjobop import ( + CancelFineTuningJobRequest, + CancelFineTuningJobRequestTypedDict, + CancelFineTuningJobResponse, + CancelFineTuningJobResponseTypedDict, + ) from .chatclassificationrequest import ( ChatClassificationRequest, ChatClassificationRequestTypedDict, @@ -234,20 +169,17 @@ ClassifierDetailedJobOut, ClassifierDetailedJobOutIntegration, ClassifierDetailedJobOutIntegrationTypedDict, - ClassifierDetailedJobOutObject, ClassifierDetailedJobOutStatus, ClassifierDetailedJobOutTypedDict, ) from .classifierftmodelout import ( ClassifierFTModelOut, - ClassifierFTModelOutObject, ClassifierFTModelOutTypedDict, ) from .classifierjobout import ( ClassifierJobOut, ClassifierJobOutIntegration, ClassifierJobOutIntegrationTypedDict, - ClassifierJobOutObject, ClassifierJobOutStatus, ClassifierJobOutTypedDict, ) @@ -269,7 +201,6 @@ CompletionDetailedJobOut, CompletionDetailedJobOutIntegration, CompletionDetailedJobOutIntegrationTypedDict, - CompletionDetailedJobOutObject, CompletionDetailedJobOutRepository, CompletionDetailedJobOutRepositoryTypedDict, CompletionDetailedJobOutStatus, @@ -278,14 +209,12 @@ from .completionevent import CompletionEvent, CompletionEventTypedDict from .completionftmodelout import ( CompletionFTModelOut, - CompletionFTModelOutObject, CompletionFTModelOutTypedDict, ) from .completionjobout import ( CompletionJobOut, CompletionJobOutIntegration, CompletionJobOutIntegrationTypedDict, - CompletionJobOutObject, CompletionJobOutRepository, CompletionJobOutRepositoryTypedDict, CompletionJobOutStatus, @@ -377,11 +306,34 @@ ConversationUsageInfo, ConversationUsageInfoTypedDict, ) - from .delete_model_v1_models_model_id_deleteop import ( - DeleteModelV1ModelsModelIDDeleteRequest, - DeleteModelV1ModelsModelIDDeleteRequestTypedDict, + from .createfinetuningjobop import ( + CreateFineTuningJobResponse, + CreateFineTuningJobResponseTypedDict, + Response, + ResponseTypedDict, + ) + from .createorupdateagentaliasop import ( + CreateOrUpdateAgentAliasRequest, + CreateOrUpdateAgentAliasRequestTypedDict, ) + from .deleteagentaliasop import ( + DeleteAgentAliasRequest, + DeleteAgentAliasRequestTypedDict, + ) + from .deleteagentop import DeleteAgentRequest, DeleteAgentRequestTypedDict + from .deleteconversationop import ( + DeleteConversationRequest, + DeleteConversationRequestTypedDict, + ) + from .deletedocumentop import DeleteDocumentRequest, DeleteDocumentRequestTypedDict + from .deletefileop import DeleteFileRequest, DeleteFileRequestTypedDict from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict + from .deletelibraryaccessop import ( + DeleteLibraryAccessRequest, + DeleteLibraryAccessRequestTypedDict, + ) + from .deletelibraryop import DeleteLibraryRequest, DeleteLibraryRequestTypedDict + from .deletemodelop import DeleteModelRequest, DeleteModelRequestTypedDict from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict from .deltamessage import ( DeltaMessage, @@ -403,6 +355,7 @@ DocumentURLChunkType, DocumentURLChunkTypedDict, ) + from .downloadfileop import DownloadFileRequest, DownloadFileRequestTypedDict from .embeddingdtype import EmbeddingDtype from .embeddingrequest import ( EmbeddingRequest, @@ -421,30 +374,6 @@ from .file import File, FileTypedDict from .filechunk import FileChunk, FileChunkTypedDict from .filepurpose import FilePurpose - from .files_api_routes_delete_fileop import ( - FilesAPIRoutesDeleteFileRequest, - FilesAPIRoutesDeleteFileRequestTypedDict, - ) - from .files_api_routes_download_fileop import ( - FilesAPIRoutesDownloadFileRequest, - FilesAPIRoutesDownloadFileRequestTypedDict, - ) - from .files_api_routes_get_signed_urlop import ( - FilesAPIRoutesGetSignedURLRequest, - FilesAPIRoutesGetSignedURLRequestTypedDict, - ) - from .files_api_routes_list_filesop import ( - FilesAPIRoutesListFilesRequest, - FilesAPIRoutesListFilesRequestTypedDict, - ) - from .files_api_routes_retrieve_fileop import ( - FilesAPIRoutesRetrieveFileRequest, - FilesAPIRoutesRetrieveFileRequestTypedDict, - ) - from .files_api_routes_upload_fileop import ( - MultiPartBodyParams, - MultiPartBodyParamsTypedDict, - ) from .fileschema import FileSchema, FileSchemaTypedDict from .filesignedurl import FileSignedURL, FileSignedURLTypedDict from .fimcompletionrequest import ( @@ -496,6 +425,59 @@ FunctionResultEntryTypedDict, ) from .functiontool import FunctionTool, FunctionToolTypedDict + from .getagentop import ( + GetAgentAgentVersion, + GetAgentAgentVersionTypedDict, + GetAgentRequest, + GetAgentRequestTypedDict, + ) + from .getagentversionop import ( + GetAgentVersionRequest, + GetAgentVersionRequestTypedDict, + ) + from .getbatchjobop import GetBatchJobRequest, GetBatchJobRequestTypedDict + from .getconversationhistoryop import ( + GetConversationHistoryRequest, + GetConversationHistoryRequestTypedDict, + ) + from .getconversationmessagesop import ( + GetConversationMessagesRequest, + GetConversationMessagesRequestTypedDict, + ) + from .getconversationop import ( + GetConversationRequest, + GetConversationRequestTypedDict, + ResponseV1ConversationsGet, + ResponseV1ConversationsGetTypedDict, + ) + from .getdocumentextractedtextsignedurlop import ( + GetDocumentExtractedTextSignedURLRequest, + GetDocumentExtractedTextSignedURLRequestTypedDict, + ) + from .getdocumentop import GetDocumentRequest, GetDocumentRequestTypedDict + from .getdocumentsignedurlop import ( + GetDocumentSignedURLRequest, + GetDocumentSignedURLRequestTypedDict, + ) + from .getdocumentstatusop import ( + GetDocumentStatusRequest, + GetDocumentStatusRequestTypedDict, + ) + from .getdocumenttextcontentop import ( + GetDocumentTextContentRequest, + GetDocumentTextContentRequestTypedDict, + ) + from .getfilesignedurlop import ( + GetFileSignedURLRequest, + GetFileSignedURLRequestTypedDict, + ) + from .getfinetuningjobop import ( + GetFineTuningJobRequest, + GetFineTuningJobRequestTypedDict, + GetFineTuningJobResponse, + GetFineTuningJobResponseTypedDict, + ) + from .getlibraryop import GetLibraryRequest, GetLibraryRequestTypedDict from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData @@ -534,145 +516,48 @@ JobInTypedDict, ) from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict - from .jobs_api_routes_batch_cancel_batch_jobop import ( - JobsAPIRoutesBatchCancelBatchJobRequest, - JobsAPIRoutesBatchCancelBatchJobRequestTypedDict, - ) - from .jobs_api_routes_batch_get_batch_jobop import ( - JobsAPIRoutesBatchGetBatchJobRequest, - JobsAPIRoutesBatchGetBatchJobRequestTypedDict, - ) - from .jobs_api_routes_batch_get_batch_jobsop import ( - JobsAPIRoutesBatchGetBatchJobsRequest, - JobsAPIRoutesBatchGetBatchJobsRequestTypedDict, - ) - from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import ( - JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, - JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict, - ) - from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningCancelFineTuningJobRequest, - JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict, - JobsAPIRoutesFineTuningCancelFineTuningJobResponse, - JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict, - ) - from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningCreateFineTuningJobResponse, - JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, - Response, - ResponseTypedDict, - ) - from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningGetFineTuningJobRequest, - JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict, - JobsAPIRoutesFineTuningGetFineTuningJobResponse, - JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict, - ) - from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( - JobsAPIRoutesFineTuningGetFineTuningJobsRequest, - JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, - JobsAPIRoutesFineTuningGetFineTuningJobsStatus, - ) - from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningStartFineTuningJobRequest, - JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict, - JobsAPIRoutesFineTuningStartFineTuningJobResponse, - JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict, - ) - from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import ( - JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, - JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict, - ) - from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import ( - JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, - JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, - JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, - JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict, - ) - from .jobsout import ( - JobsOut, - JobsOutData, - JobsOutDataTypedDict, - JobsOutObject, - JobsOutTypedDict, - ) + from .jobsout import JobsOut, JobsOutData, JobsOutDataTypedDict, JobsOutTypedDict from .jsonschema import JSONSchema, JSONSchemaTypedDict from .legacyjobmetadataout import ( LegacyJobMetadataOut, - LegacyJobMetadataOutObject, LegacyJobMetadataOutTypedDict, ) - from .libraries_delete_v1op import ( - LibrariesDeleteV1Request, - LibrariesDeleteV1RequestTypedDict, - ) - from .libraries_documents_delete_v1op import ( - LibrariesDocumentsDeleteV1Request, - LibrariesDocumentsDeleteV1RequestTypedDict, - ) - from .libraries_documents_get_extracted_text_signed_url_v1op import ( - LibrariesDocumentsGetExtractedTextSignedURLV1Request, - LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict, - ) - from .libraries_documents_get_signed_url_v1op import ( - LibrariesDocumentsGetSignedURLV1Request, - LibrariesDocumentsGetSignedURLV1RequestTypedDict, - ) - from .libraries_documents_get_status_v1op import ( - LibrariesDocumentsGetStatusV1Request, - LibrariesDocumentsGetStatusV1RequestTypedDict, - ) - from .libraries_documents_get_text_content_v1op import ( - LibrariesDocumentsGetTextContentV1Request, - LibrariesDocumentsGetTextContentV1RequestTypedDict, - ) - from .libraries_documents_get_v1op import ( - LibrariesDocumentsGetV1Request, - LibrariesDocumentsGetV1RequestTypedDict, - ) - from .libraries_documents_list_v1op import ( - LibrariesDocumentsListV1Request, - LibrariesDocumentsListV1RequestTypedDict, - ) - from .libraries_documents_reprocess_v1op import ( - LibrariesDocumentsReprocessV1Request, - LibrariesDocumentsReprocessV1RequestTypedDict, - ) - from .libraries_documents_update_v1op import ( - LibrariesDocumentsUpdateV1Request, - LibrariesDocumentsUpdateV1RequestTypedDict, - ) - from .libraries_documents_upload_v1op import ( - DocumentUpload, - DocumentUploadTypedDict, - LibrariesDocumentsUploadV1Request, - LibrariesDocumentsUploadV1RequestTypedDict, - ) - from .libraries_get_v1op import ( - LibrariesGetV1Request, - LibrariesGetV1RequestTypedDict, - ) - from .libraries_share_create_v1op import ( - LibrariesShareCreateV1Request, - LibrariesShareCreateV1RequestTypedDict, - ) - from .libraries_share_delete_v1op import ( - LibrariesShareDeleteV1Request, - LibrariesShareDeleteV1RequestTypedDict, - ) - from .libraries_share_list_v1op import ( - LibrariesShareListV1Request, - LibrariesShareListV1RequestTypedDict, - ) - from .libraries_update_v1op import ( - LibrariesUpdateV1Request, - LibrariesUpdateV1RequestTypedDict, - ) from .libraryin import LibraryIn, LibraryInTypedDict from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict from .libraryout import LibraryOut, LibraryOutTypedDict + from .listagentaliasesop import ( + ListAgentAliasesRequest, + ListAgentAliasesRequestTypedDict, + ) + from .listagentsop import ListAgentsRequest, ListAgentsRequestTypedDict + from .listagentversionsop import ( + ListAgentVersionsRequest, + ListAgentVersionsRequestTypedDict, + ) + from .listbatchjobsop import ( + ListBatchJobsRequest, + ListBatchJobsRequestTypedDict, + OrderBy, + ) + from .listconversationsop import ( + ListConversationsRequest, + ListConversationsRequestTypedDict, + ListConversationsResponse, + ListConversationsResponseTypedDict, + ) from .listdocumentout import ListDocumentOut, ListDocumentOutTypedDict + from .listdocumentsop import ListDocumentsRequest, ListDocumentsRequestTypedDict + from .listfilesop import ListFilesRequest, ListFilesRequestTypedDict from .listfilesout import ListFilesOut, ListFilesOutTypedDict + from .listfinetuningjobsop import ( + ListFineTuningJobsRequest, + ListFineTuningJobsRequestTypedDict, + ListFineTuningJobsStatus, + ) + from .listlibraryaccessesop import ( + ListLibraryAccessesRequest, + ListLibraryAccessesRequestTypedDict, + ) from .listlibraryout import ListLibraryOut, ListLibraryOutTypedDict from .listsharingout import ListSharingOut, ListSharingOutTypedDict from .messageentries import MessageEntries, MessageEntriesTypedDict @@ -772,6 +657,10 @@ ReferenceChunkType, ReferenceChunkTypedDict, ) + from .reprocessdocumentop import ( + ReprocessDocumentRequest, + ReprocessDocumentRequestTypedDict, + ) from .requestsource import RequestSource from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict @@ -782,13 +671,22 @@ ResponseStartedEventTypedDict, ) from .responsevalidationerror import ResponseValidationError - from .retrieve_model_v1_models_model_id_getop import ( + from .restartconversationop import ( + RestartConversationRequest, + RestartConversationRequestTypedDict, + ) + from .restartconversationstreamop import ( + RestartConversationStreamRequest, + RestartConversationStreamRequestTypedDict, + ) + from .retrievefileop import RetrieveFileRequest, RetrieveFileRequestTypedDict + from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict + from .retrievemodelop import ( ResponseRetrieveModelV1ModelsModelIDGet, ResponseRetrieveModelV1ModelsModelIDGetTypedDict, - RetrieveModelV1ModelsModelIDGetRequest, - RetrieveModelV1ModelsModelIDGetRequestTypedDict, + RetrieveModelRequest, + RetrieveModelRequestTypedDict, ) - from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict from .sampletype import SampleType from .sdkerror import SDKError from .security import Security, SecurityTypedDict @@ -798,6 +696,12 @@ from .sharingout import SharingOut, SharingOutTypedDict from .source import Source from .ssetypes import SSETypes + from .startfinetuningjobop import ( + StartFineTuningJobRequest, + StartFineTuningJobRequestTypedDict, + StartFineTuningJobResponse, + StartFineTuningJobResponseTypedDict, + ) from .systemmessage import ( SystemMessage, SystemMessageContent, @@ -901,12 +805,33 @@ TranscriptionStreamTextDelta, TranscriptionStreamTextDeltaTypedDict, ) - from .unarchiveftmodelout import ( - UnarchiveFTModelOut, - UnarchiveFTModelOutObject, - UnarchiveFTModelOutTypedDict, + from .unarchiveftmodelout import UnarchiveFTModelOut, UnarchiveFTModelOutTypedDict + from .unarchivemodelop import UnarchiveModelRequest, UnarchiveModelRequestTypedDict + from .updateagentop import UpdateAgentRequest, UpdateAgentRequestTypedDict + from .updateagentversionop import ( + UpdateAgentVersionRequest, + UpdateAgentVersionRequestTypedDict, ) + from .updatedocumentop import UpdateDocumentRequest, UpdateDocumentRequestTypedDict from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict + from .updatelibraryop import UpdateLibraryRequest, UpdateLibraryRequestTypedDict + from .updatemodelop import ( + UpdateModelRequest, + UpdateModelRequestTypedDict, + UpdateModelResponse, + UpdateModelResponseTypedDict, + ) + from .updateorcreatelibraryaccessop import ( + UpdateOrCreateLibraryAccessRequest, + UpdateOrCreateLibraryAccessRequestTypedDict, + ) + from .uploaddocumentop import ( + DocumentUpload, + DocumentUploadTypedDict, + UploadDocumentRequest, + UploadDocumentRequestTypedDict, + ) + from .uploadfileop import MultiPartBodyParams, MultiPartBodyParamsTypedDict from .uploadfileout import UploadFileOut, UploadFileOutTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from .usermessage import ( @@ -959,46 +884,6 @@ "AgentUpdateRequestTool", "AgentUpdateRequestToolTypedDict", "AgentUpdateRequestTypedDict", - "AgentsAPIV1AgentsCreateOrUpdateAliasRequest", - "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict", - "AgentsAPIV1AgentsDeleteRequest", - "AgentsAPIV1AgentsDeleteRequestTypedDict", - "AgentsAPIV1AgentsGetAgentVersion", - "AgentsAPIV1AgentsGetAgentVersionTypedDict", - "AgentsAPIV1AgentsGetRequest", - "AgentsAPIV1AgentsGetRequestTypedDict", - "AgentsAPIV1AgentsGetVersionRequest", - "AgentsAPIV1AgentsGetVersionRequestTypedDict", - "AgentsAPIV1AgentsListRequest", - "AgentsAPIV1AgentsListRequestTypedDict", - "AgentsAPIV1AgentsListVersionAliasesRequest", - "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict", - "AgentsAPIV1AgentsListVersionsRequest", - "AgentsAPIV1AgentsListVersionsRequestTypedDict", - "AgentsAPIV1AgentsUpdateRequest", - "AgentsAPIV1AgentsUpdateRequestTypedDict", - "AgentsAPIV1AgentsUpdateVersionRequest", - "AgentsAPIV1AgentsUpdateVersionRequestTypedDict", - "AgentsAPIV1ConversationsAppendRequest", - "AgentsAPIV1ConversationsAppendRequestTypedDict", - "AgentsAPIV1ConversationsAppendStreamRequest", - "AgentsAPIV1ConversationsAppendStreamRequestTypedDict", - "AgentsAPIV1ConversationsDeleteRequest", - "AgentsAPIV1ConversationsDeleteRequestTypedDict", - "AgentsAPIV1ConversationsGetRequest", - "AgentsAPIV1ConversationsGetRequestTypedDict", - "AgentsAPIV1ConversationsHistoryRequest", - "AgentsAPIV1ConversationsHistoryRequestTypedDict", - "AgentsAPIV1ConversationsListRequest", - "AgentsAPIV1ConversationsListRequestTypedDict", - "AgentsAPIV1ConversationsListResponse", - "AgentsAPIV1ConversationsListResponseTypedDict", - "AgentsAPIV1ConversationsMessagesRequest", - "AgentsAPIV1ConversationsMessagesRequestTypedDict", - "AgentsAPIV1ConversationsRestartRequest", - "AgentsAPIV1ConversationsRestartRequestTypedDict", - "AgentsAPIV1ConversationsRestartStreamRequest", - "AgentsAPIV1ConversationsRestartStreamRequestTypedDict", "AgentsCompletionRequest", "AgentsCompletionRequestMessage", "AgentsCompletionRequestMessageTypedDict", @@ -1015,9 +900,14 @@ "AgentsCompletionStreamRequestToolChoice", "AgentsCompletionStreamRequestToolChoiceTypedDict", "AgentsCompletionStreamRequestTypedDict", + "AppendConversationRequest", + "AppendConversationRequestTypedDict", + "AppendConversationStreamRequest", + "AppendConversationStreamRequestTypedDict", "ArchiveFTModelOut", - "ArchiveFTModelOutObject", "ArchiveFTModelOutTypedDict", + "ArchiveModelRequest", + "ArchiveModelRequestTypedDict", "Arguments", "ArgumentsTypedDict", "AssistantMessage", @@ -1043,15 +933,19 @@ "BatchJobIn", "BatchJobInTypedDict", "BatchJobOut", - "BatchJobOutObject", "BatchJobOutTypedDict", "BatchJobStatus", "BatchJobsOut", - "BatchJobsOutObject", "BatchJobsOutTypedDict", "BatchRequest", "BatchRequestTypedDict", "BuiltInConnectors", + "CancelBatchJobRequest", + "CancelBatchJobRequestTypedDict", + "CancelFineTuningJobRequest", + "CancelFineTuningJobRequestTypedDict", + "CancelFineTuningJobResponse", + "CancelFineTuningJobResponseTypedDict", "ChatClassificationRequest", "ChatClassificationRequestTypedDict", "ChatCompletionChoice", @@ -1096,16 +990,13 @@ "ClassifierDetailedJobOut", "ClassifierDetailedJobOutIntegration", "ClassifierDetailedJobOutIntegrationTypedDict", - "ClassifierDetailedJobOutObject", "ClassifierDetailedJobOutStatus", "ClassifierDetailedJobOutTypedDict", "ClassifierFTModelOut", - "ClassifierFTModelOutObject", "ClassifierFTModelOutTypedDict", "ClassifierJobOut", "ClassifierJobOutIntegration", "ClassifierJobOutIntegrationTypedDict", - "ClassifierJobOutObject", "ClassifierJobOutStatus", "ClassifierJobOutTypedDict", "ClassifierTargetIn", @@ -1127,7 +1018,6 @@ "CompletionDetailedJobOut", "CompletionDetailedJobOutIntegration", "CompletionDetailedJobOutIntegrationTypedDict", - "CompletionDetailedJobOutObject", "CompletionDetailedJobOutRepository", "CompletionDetailedJobOutRepositoryTypedDict", "CompletionDetailedJobOutStatus", @@ -1135,12 +1025,10 @@ "CompletionEvent", "CompletionEventTypedDict", "CompletionFTModelOut", - "CompletionFTModelOutObject", "CompletionFTModelOutTypedDict", "CompletionJobOut", "CompletionJobOutIntegration", "CompletionJobOutIntegrationTypedDict", - "CompletionJobOutObject", "CompletionJobOutRepository", "CompletionJobOutRepositoryTypedDict", "CompletionJobOutStatus", @@ -1201,12 +1089,30 @@ "ConversationStreamRequestTypedDict", "ConversationUsageInfo", "ConversationUsageInfoTypedDict", + "CreateFineTuningJobResponse", + "CreateFineTuningJobResponseTypedDict", + "CreateOrUpdateAgentAliasRequest", + "CreateOrUpdateAgentAliasRequestTypedDict", + "DeleteAgentAliasRequest", + "DeleteAgentAliasRequestTypedDict", + "DeleteAgentRequest", + "DeleteAgentRequestTypedDict", + "DeleteConversationRequest", + "DeleteConversationRequestTypedDict", + "DeleteDocumentRequest", + "DeleteDocumentRequestTypedDict", "DeleteFileOut", "DeleteFileOutTypedDict", + "DeleteFileRequest", + "DeleteFileRequestTypedDict", + "DeleteLibraryAccessRequest", + "DeleteLibraryAccessRequestTypedDict", + "DeleteLibraryRequest", + "DeleteLibraryRequestTypedDict", "DeleteModelOut", "DeleteModelOutTypedDict", - "DeleteModelV1ModelsModelIDDeleteRequest", - "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", + "DeleteModelRequest", + "DeleteModelRequestTypedDict", "DeltaMessage", "DeltaMessageContent", "DeltaMessageContentTypedDict", @@ -1226,6 +1132,8 @@ "DocumentUpdateInTypedDict", "DocumentUpload", "DocumentUploadTypedDict", + "DownloadFileRequest", + "DownloadFileRequestTypedDict", "EmbeddingDtype", "EmbeddingRequest", "EmbeddingRequestInputs", @@ -1265,16 +1173,6 @@ "FileSignedURL", "FileSignedURLTypedDict", "FileTypedDict", - "FilesAPIRoutesDeleteFileRequest", - "FilesAPIRoutesDeleteFileRequestTypedDict", - "FilesAPIRoutesDownloadFileRequest", - "FilesAPIRoutesDownloadFileRequestTypedDict", - "FilesAPIRoutesGetSignedURLRequest", - "FilesAPIRoutesGetSignedURLRequestTypedDict", - "FilesAPIRoutesListFilesRequest", - "FilesAPIRoutesListFilesRequestTypedDict", - "FilesAPIRoutesRetrieveFileRequest", - "FilesAPIRoutesRetrieveFileRequestTypedDict", "FineTuneableModelType", "Format", "Function", @@ -1297,6 +1195,38 @@ "FunctionTool", "FunctionToolTypedDict", "FunctionTypedDict", + "GetAgentAgentVersion", + "GetAgentAgentVersionTypedDict", + "GetAgentRequest", + "GetAgentRequestTypedDict", + "GetAgentVersionRequest", + "GetAgentVersionRequestTypedDict", + "GetBatchJobRequest", + "GetBatchJobRequestTypedDict", + "GetConversationHistoryRequest", + "GetConversationHistoryRequestTypedDict", + "GetConversationMessagesRequest", + "GetConversationMessagesRequestTypedDict", + "GetConversationRequest", + "GetConversationRequestTypedDict", + "GetDocumentExtractedTextSignedURLRequest", + "GetDocumentExtractedTextSignedURLRequestTypedDict", + "GetDocumentRequest", + "GetDocumentRequestTypedDict", + "GetDocumentSignedURLRequest", + "GetDocumentSignedURLRequestTypedDict", + "GetDocumentStatusRequest", + "GetDocumentStatusRequestTypedDict", + "GetDocumentTextContentRequest", + "GetDocumentTextContentRequestTypedDict", + "GetFileSignedURLRequest", + "GetFileSignedURLRequestTypedDict", + "GetFineTuningJobRequest", + "GetFineTuningJobRequestTypedDict", + "GetFineTuningJobResponse", + "GetFineTuningJobResponseTypedDict", + "GetLibraryRequest", + "GetLibraryRequestTypedDict", "GithubRepositoryIn", "GithubRepositoryInTypedDict", "GithubRepositoryOut", @@ -1336,87 +1266,43 @@ "JobInTypedDict", "JobMetadataOut", "JobMetadataOutTypedDict", - "JobsAPIRoutesBatchCancelBatchJobRequest", - "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict", - "JobsAPIRoutesBatchGetBatchJobRequest", - "JobsAPIRoutesBatchGetBatchJobRequestTypedDict", - "JobsAPIRoutesBatchGetBatchJobsRequest", - "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponse", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningGetFineTuningJobRequest", - "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", - "JobsAPIRoutesFineTuningGetFineTuningJobResponse", - "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", - "JobsAPIRoutesFineTuningGetFineTuningJobsStatus", - "JobsAPIRoutesFineTuningStartFineTuningJobRequest", - "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", - "JobsAPIRoutesFineTuningStartFineTuningJobResponse", - "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", "JobsOut", "JobsOutData", "JobsOutDataTypedDict", - "JobsOutObject", "JobsOutTypedDict", "LegacyJobMetadataOut", - "LegacyJobMetadataOutObject", "LegacyJobMetadataOutTypedDict", - "LibrariesDeleteV1Request", - "LibrariesDeleteV1RequestTypedDict", - "LibrariesDocumentsDeleteV1Request", - "LibrariesDocumentsDeleteV1RequestTypedDict", - "LibrariesDocumentsGetExtractedTextSignedURLV1Request", - "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict", - "LibrariesDocumentsGetSignedURLV1Request", - "LibrariesDocumentsGetSignedURLV1RequestTypedDict", - "LibrariesDocumentsGetStatusV1Request", - "LibrariesDocumentsGetStatusV1RequestTypedDict", - "LibrariesDocumentsGetTextContentV1Request", - "LibrariesDocumentsGetTextContentV1RequestTypedDict", - "LibrariesDocumentsGetV1Request", - "LibrariesDocumentsGetV1RequestTypedDict", - "LibrariesDocumentsListV1Request", - "LibrariesDocumentsListV1RequestTypedDict", - "LibrariesDocumentsReprocessV1Request", - "LibrariesDocumentsReprocessV1RequestTypedDict", - "LibrariesDocumentsUpdateV1Request", - "LibrariesDocumentsUpdateV1RequestTypedDict", - "LibrariesDocumentsUploadV1Request", - "LibrariesDocumentsUploadV1RequestTypedDict", - "LibrariesGetV1Request", - "LibrariesGetV1RequestTypedDict", - "LibrariesShareCreateV1Request", - "LibrariesShareCreateV1RequestTypedDict", - "LibrariesShareDeleteV1Request", - "LibrariesShareDeleteV1RequestTypedDict", - "LibrariesShareListV1Request", - "LibrariesShareListV1RequestTypedDict", - "LibrariesUpdateV1Request", - "LibrariesUpdateV1RequestTypedDict", "LibraryIn", "LibraryInTypedDict", "LibraryInUpdate", "LibraryInUpdateTypedDict", "LibraryOut", "LibraryOutTypedDict", + "ListAgentAliasesRequest", + "ListAgentAliasesRequestTypedDict", + "ListAgentVersionsRequest", + "ListAgentVersionsRequestTypedDict", + "ListAgentsRequest", + "ListAgentsRequestTypedDict", + "ListBatchJobsRequest", + "ListBatchJobsRequestTypedDict", + "ListConversationsRequest", + "ListConversationsRequestTypedDict", + "ListConversationsResponse", + "ListConversationsResponseTypedDict", "ListDocumentOut", "ListDocumentOutTypedDict", + "ListDocumentsRequest", + "ListDocumentsRequestTypedDict", "ListFilesOut", "ListFilesOutTypedDict", + "ListFilesRequest", + "ListFilesRequestTypedDict", + "ListFineTuningJobsRequest", + "ListFineTuningJobsRequestTypedDict", + "ListFineTuningJobsStatus", + "ListLibraryAccessesRequest", + "ListLibraryAccessesRequestTypedDict", "ListLibraryOut", "ListLibraryOutTypedDict", "ListSharingOut", @@ -1484,6 +1370,7 @@ "OCRTableObjectTypedDict", "OCRUsageInfo", "OCRUsageInfoTypedDict", + "OrderBy", "Output", "OutputContentChunks", "OutputContentChunksTypedDict", @@ -1509,6 +1396,8 @@ "ReferenceChunk", "ReferenceChunkType", "ReferenceChunkTypedDict", + "ReprocessDocumentRequest", + "ReprocessDocumentRequestTypedDict", "RequestSource", "Response", "ResponseDoneEvent", @@ -1526,10 +1415,16 @@ "ResponseV1ConversationsGet", "ResponseV1ConversationsGetTypedDict", "ResponseValidationError", + "RestartConversationRequest", + "RestartConversationRequestTypedDict", + "RestartConversationStreamRequest", + "RestartConversationStreamRequestTypedDict", "RetrieveFileOut", "RetrieveFileOutTypedDict", - "RetrieveModelV1ModelsModelIDGetRequest", - "RetrieveModelV1ModelsModelIDGetRequestTypedDict", + "RetrieveFileRequest", + "RetrieveFileRequestTypedDict", + "RetrieveModelRequest", + "RetrieveModelRequestTypedDict", "SDKError", "SSETypes", "SampleType", @@ -1543,6 +1438,10 @@ "SharingOut", "SharingOutTypedDict", "Source", + "StartFineTuningJobRequest", + "StartFineTuningJobRequestTypedDict", + "StartFineTuningJobResponse", + "StartFineTuningJobResponseTypedDict", "SystemMessage", "SystemMessageContent", "SystemMessageContentChunks", @@ -1620,10 +1519,27 @@ "TranscriptionStreamTextDelta", "TranscriptionStreamTextDeltaTypedDict", "UnarchiveFTModelOut", - "UnarchiveFTModelOutObject", "UnarchiveFTModelOutTypedDict", + "UnarchiveModelRequest", + "UnarchiveModelRequestTypedDict", + "UpdateAgentRequest", + "UpdateAgentRequestTypedDict", + "UpdateAgentVersionRequest", + "UpdateAgentVersionRequestTypedDict", + "UpdateDocumentRequest", + "UpdateDocumentRequestTypedDict", "UpdateFTModelIn", "UpdateFTModelInTypedDict", + "UpdateLibraryRequest", + "UpdateLibraryRequestTypedDict", + "UpdateModelRequest", + "UpdateModelRequestTypedDict", + "UpdateModelResponse", + "UpdateModelResponseTypedDict", + "UpdateOrCreateLibraryAccessRequest", + "UpdateOrCreateLibraryAccessRequestTypedDict", + "UploadDocumentRequest", + "UploadDocumentRequestTypedDict", "UploadFileOut", "UploadFileOutTypedDict", "UsageInfo", @@ -1669,48 +1585,6 @@ "AgentHandoffEntryTypedDict": ".agenthandoffentry", "AgentHandoffStartedEvent": ".agenthandoffstartedevent", "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", - "AgentsAPIV1AgentsCreateOrUpdateAliasRequest": ".agents_api_v1_agents_create_or_update_aliasop", - "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict": ".agents_api_v1_agents_create_or_update_aliasop", - "AgentsAPIV1AgentsDeleteRequest": ".agents_api_v1_agents_deleteop", - "AgentsAPIV1AgentsDeleteRequestTypedDict": ".agents_api_v1_agents_deleteop", - "AgentsAPIV1AgentsGetVersionRequest": ".agents_api_v1_agents_get_versionop", - "AgentsAPIV1AgentsGetVersionRequestTypedDict": ".agents_api_v1_agents_get_versionop", - "AgentsAPIV1AgentsGetAgentVersion": ".agents_api_v1_agents_getop", - "AgentsAPIV1AgentsGetAgentVersionTypedDict": ".agents_api_v1_agents_getop", - "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", - "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", - "AgentsAPIV1AgentsListVersionAliasesRequest": ".agents_api_v1_agents_list_version_aliasesop", - "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict": ".agents_api_v1_agents_list_version_aliasesop", - "AgentsAPIV1AgentsListVersionsRequest": ".agents_api_v1_agents_list_versionsop", - "AgentsAPIV1AgentsListVersionsRequestTypedDict": ".agents_api_v1_agents_list_versionsop", - "AgentsAPIV1AgentsListRequest": ".agents_api_v1_agents_listop", - "AgentsAPIV1AgentsListRequestTypedDict": ".agents_api_v1_agents_listop", - "AgentsAPIV1AgentsUpdateVersionRequest": ".agents_api_v1_agents_update_versionop", - "AgentsAPIV1AgentsUpdateVersionRequestTypedDict": ".agents_api_v1_agents_update_versionop", - "AgentsAPIV1AgentsUpdateRequest": ".agents_api_v1_agents_updateop", - "AgentsAPIV1AgentsUpdateRequestTypedDict": ".agents_api_v1_agents_updateop", - "AgentsAPIV1ConversationsAppendStreamRequest": ".agents_api_v1_conversations_append_streamop", - "AgentsAPIV1ConversationsAppendStreamRequestTypedDict": ".agents_api_v1_conversations_append_streamop", - "AgentsAPIV1ConversationsAppendRequest": ".agents_api_v1_conversations_appendop", - "AgentsAPIV1ConversationsAppendRequestTypedDict": ".agents_api_v1_conversations_appendop", - "AgentsAPIV1ConversationsDeleteRequest": ".agents_api_v1_conversations_deleteop", - "AgentsAPIV1ConversationsDeleteRequestTypedDict": ".agents_api_v1_conversations_deleteop", - "AgentsAPIV1ConversationsGetRequest": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsGetRequestTypedDict": ".agents_api_v1_conversations_getop", - "ResponseV1ConversationsGet": ".agents_api_v1_conversations_getop", - "ResponseV1ConversationsGetTypedDict": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsHistoryRequest": ".agents_api_v1_conversations_historyop", - "AgentsAPIV1ConversationsHistoryRequestTypedDict": ".agents_api_v1_conversations_historyop", - "AgentsAPIV1ConversationsListRequest": ".agents_api_v1_conversations_listop", - "AgentsAPIV1ConversationsListRequestTypedDict": ".agents_api_v1_conversations_listop", - "AgentsAPIV1ConversationsListResponse": ".agents_api_v1_conversations_listop", - "AgentsAPIV1ConversationsListResponseTypedDict": ".agents_api_v1_conversations_listop", - "AgentsAPIV1ConversationsMessagesRequest": ".agents_api_v1_conversations_messagesop", - "AgentsAPIV1ConversationsMessagesRequestTypedDict": ".agents_api_v1_conversations_messagesop", - "AgentsAPIV1ConversationsRestartStreamRequest": ".agents_api_v1_conversations_restart_streamop", - "AgentsAPIV1ConversationsRestartStreamRequestTypedDict": ".agents_api_v1_conversations_restart_streamop", - "AgentsAPIV1ConversationsRestartRequest": ".agents_api_v1_conversations_restartop", - "AgentsAPIV1ConversationsRestartRequestTypedDict": ".agents_api_v1_conversations_restartop", "AgentsCompletionRequest": ".agentscompletionrequest", "AgentsCompletionRequestMessage": ".agentscompletionrequest", "AgentsCompletionRequestMessageTypedDict": ".agentscompletionrequest", @@ -1732,9 +1606,14 @@ "AgentUpdateRequestToolTypedDict": ".agentupdaterequest", "AgentUpdateRequestTypedDict": ".agentupdaterequest", "APIEndpoint": ".apiendpoint", + "AppendConversationRequest": ".appendconversationop", + "AppendConversationRequestTypedDict": ".appendconversationop", + "AppendConversationStreamRequest": ".appendconversationstreamop", + "AppendConversationStreamRequestTypedDict": ".appendconversationstreamop", "ArchiveFTModelOut": ".archiveftmodelout", - "ArchiveFTModelOutObject": ".archiveftmodelout", "ArchiveFTModelOutTypedDict": ".archiveftmodelout", + "ArchiveModelRequest": ".archivemodelop", + "ArchiveModelRequestTypedDict": ".archivemodelop", "AssistantMessage": ".assistantmessage", "AssistantMessageContent": ".assistantmessage", "AssistantMessageContentTypedDict": ".assistantmessage", @@ -1756,15 +1635,19 @@ "BatchJobIn": ".batchjobin", "BatchJobInTypedDict": ".batchjobin", "BatchJobOut": ".batchjobout", - "BatchJobOutObject": ".batchjobout", "BatchJobOutTypedDict": ".batchjobout", "BatchJobsOut": ".batchjobsout", - "BatchJobsOutObject": ".batchjobsout", "BatchJobsOutTypedDict": ".batchjobsout", "BatchJobStatus": ".batchjobstatus", "BatchRequest": ".batchrequest", "BatchRequestTypedDict": ".batchrequest", "BuiltInConnectors": ".builtinconnectors", + "CancelBatchJobRequest": ".cancelbatchjobop", + "CancelBatchJobRequestTypedDict": ".cancelbatchjobop", + "CancelFineTuningJobRequest": ".cancelfinetuningjobop", + "CancelFineTuningJobRequestTypedDict": ".cancelfinetuningjobop", + "CancelFineTuningJobResponse": ".cancelfinetuningjobop", + "CancelFineTuningJobResponseTypedDict": ".cancelfinetuningjobop", "ChatClassificationRequest": ".chatclassificationrequest", "ChatClassificationRequestTypedDict": ".chatclassificationrequest", "ChatCompletionChoice": ".chatcompletionchoice", @@ -1809,16 +1692,13 @@ "ClassifierDetailedJobOut": ".classifierdetailedjobout", "ClassifierDetailedJobOutIntegration": ".classifierdetailedjobout", "ClassifierDetailedJobOutIntegrationTypedDict": ".classifierdetailedjobout", - "ClassifierDetailedJobOutObject": ".classifierdetailedjobout", "ClassifierDetailedJobOutStatus": ".classifierdetailedjobout", "ClassifierDetailedJobOutTypedDict": ".classifierdetailedjobout", "ClassifierFTModelOut": ".classifierftmodelout", - "ClassifierFTModelOutObject": ".classifierftmodelout", "ClassifierFTModelOutTypedDict": ".classifierftmodelout", "ClassifierJobOut": ".classifierjobout", "ClassifierJobOutIntegration": ".classifierjobout", "ClassifierJobOutIntegrationTypedDict": ".classifierjobout", - "ClassifierJobOutObject": ".classifierjobout", "ClassifierJobOutStatus": ".classifierjobout", "ClassifierJobOutTypedDict": ".classifierjobout", "ClassifierTargetIn": ".classifiertargetin", @@ -1840,7 +1720,6 @@ "CompletionDetailedJobOut": ".completiondetailedjobout", "CompletionDetailedJobOutIntegration": ".completiondetailedjobout", "CompletionDetailedJobOutIntegrationTypedDict": ".completiondetailedjobout", - "CompletionDetailedJobOutObject": ".completiondetailedjobout", "CompletionDetailedJobOutRepository": ".completiondetailedjobout", "CompletionDetailedJobOutRepositoryTypedDict": ".completiondetailedjobout", "CompletionDetailedJobOutStatus": ".completiondetailedjobout", @@ -1848,12 +1727,10 @@ "CompletionEvent": ".completionevent", "CompletionEventTypedDict": ".completionevent", "CompletionFTModelOut": ".completionftmodelout", - "CompletionFTModelOutObject": ".completionftmodelout", "CompletionFTModelOutTypedDict": ".completionftmodelout", "CompletionJobOut": ".completionjobout", "CompletionJobOutIntegration": ".completionjobout", "CompletionJobOutIntegrationTypedDict": ".completionjobout", - "CompletionJobOutObject": ".completionjobout", "CompletionJobOutRepository": ".completionjobout", "CompletionJobOutRepositoryTypedDict": ".completionjobout", "CompletionJobOutStatus": ".completionjobout", @@ -1918,10 +1795,30 @@ "ConversationStreamRequestTypedDict": ".conversationstreamrequest", "ConversationUsageInfo": ".conversationusageinfo", "ConversationUsageInfoTypedDict": ".conversationusageinfo", - "DeleteModelV1ModelsModelIDDeleteRequest": ".delete_model_v1_models_model_id_deleteop", - "DeleteModelV1ModelsModelIDDeleteRequestTypedDict": ".delete_model_v1_models_model_id_deleteop", + "CreateFineTuningJobResponse": ".createfinetuningjobop", + "CreateFineTuningJobResponseTypedDict": ".createfinetuningjobop", + "Response": ".createfinetuningjobop", + "ResponseTypedDict": ".createfinetuningjobop", + "CreateOrUpdateAgentAliasRequest": ".createorupdateagentaliasop", + "CreateOrUpdateAgentAliasRequestTypedDict": ".createorupdateagentaliasop", + "DeleteAgentAliasRequest": ".deleteagentaliasop", + "DeleteAgentAliasRequestTypedDict": ".deleteagentaliasop", + "DeleteAgentRequest": ".deleteagentop", + "DeleteAgentRequestTypedDict": ".deleteagentop", + "DeleteConversationRequest": ".deleteconversationop", + "DeleteConversationRequestTypedDict": ".deleteconversationop", + "DeleteDocumentRequest": ".deletedocumentop", + "DeleteDocumentRequestTypedDict": ".deletedocumentop", + "DeleteFileRequest": ".deletefileop", + "DeleteFileRequestTypedDict": ".deletefileop", "DeleteFileOut": ".deletefileout", "DeleteFileOutTypedDict": ".deletefileout", + "DeleteLibraryAccessRequest": ".deletelibraryaccessop", + "DeleteLibraryAccessRequestTypedDict": ".deletelibraryaccessop", + "DeleteLibraryRequest": ".deletelibraryop", + "DeleteLibraryRequestTypedDict": ".deletelibraryop", + "DeleteModelRequest": ".deletemodelop", + "DeleteModelRequestTypedDict": ".deletemodelop", "DeleteModelOut": ".deletemodelout", "DeleteModelOutTypedDict": ".deletemodelout", "DeltaMessage": ".deltamessage", @@ -1941,6 +1838,8 @@ "DocumentURLChunk": ".documenturlchunk", "DocumentURLChunkType": ".documenturlchunk", "DocumentURLChunkTypedDict": ".documenturlchunk", + "DownloadFileRequest": ".downloadfileop", + "DownloadFileRequestTypedDict": ".downloadfileop", "EmbeddingDtype": ".embeddingdtype", "EmbeddingRequest": ".embeddingrequest", "EmbeddingRequestInputs": ".embeddingrequest", @@ -1959,18 +1858,6 @@ "FileChunk": ".filechunk", "FileChunkTypedDict": ".filechunk", "FilePurpose": ".filepurpose", - "FilesAPIRoutesDeleteFileRequest": ".files_api_routes_delete_fileop", - "FilesAPIRoutesDeleteFileRequestTypedDict": ".files_api_routes_delete_fileop", - "FilesAPIRoutesDownloadFileRequest": ".files_api_routes_download_fileop", - "FilesAPIRoutesDownloadFileRequestTypedDict": ".files_api_routes_download_fileop", - "FilesAPIRoutesGetSignedURLRequest": ".files_api_routes_get_signed_urlop", - "FilesAPIRoutesGetSignedURLRequestTypedDict": ".files_api_routes_get_signed_urlop", - "FilesAPIRoutesListFilesRequest": ".files_api_routes_list_filesop", - "FilesAPIRoutesListFilesRequestTypedDict": ".files_api_routes_list_filesop", - "FilesAPIRoutesRetrieveFileRequest": ".files_api_routes_retrieve_fileop", - "FilesAPIRoutesRetrieveFileRequestTypedDict": ".files_api_routes_retrieve_fileop", - "MultiPartBodyParams": ".files_api_routes_upload_fileop", - "MultiPartBodyParamsTypedDict": ".files_api_routes_upload_fileop", "FileSchema": ".fileschema", "FileSchemaTypedDict": ".fileschema", "FileSignedURL": ".filesignedurl", @@ -2013,6 +1900,40 @@ "FunctionResultEntryTypedDict": ".functionresultentry", "FunctionTool": ".functiontool", "FunctionToolTypedDict": ".functiontool", + "GetAgentAgentVersion": ".getagentop", + "GetAgentAgentVersionTypedDict": ".getagentop", + "GetAgentRequest": ".getagentop", + "GetAgentRequestTypedDict": ".getagentop", + "GetAgentVersionRequest": ".getagentversionop", + "GetAgentVersionRequestTypedDict": ".getagentversionop", + "GetBatchJobRequest": ".getbatchjobop", + "GetBatchJobRequestTypedDict": ".getbatchjobop", + "GetConversationHistoryRequest": ".getconversationhistoryop", + "GetConversationHistoryRequestTypedDict": ".getconversationhistoryop", + "GetConversationMessagesRequest": ".getconversationmessagesop", + "GetConversationMessagesRequestTypedDict": ".getconversationmessagesop", + "GetConversationRequest": ".getconversationop", + "GetConversationRequestTypedDict": ".getconversationop", + "ResponseV1ConversationsGet": ".getconversationop", + "ResponseV1ConversationsGetTypedDict": ".getconversationop", + "GetDocumentExtractedTextSignedURLRequest": ".getdocumentextractedtextsignedurlop", + "GetDocumentExtractedTextSignedURLRequestTypedDict": ".getdocumentextractedtextsignedurlop", + "GetDocumentRequest": ".getdocumentop", + "GetDocumentRequestTypedDict": ".getdocumentop", + "GetDocumentSignedURLRequest": ".getdocumentsignedurlop", + "GetDocumentSignedURLRequestTypedDict": ".getdocumentsignedurlop", + "GetDocumentStatusRequest": ".getdocumentstatusop", + "GetDocumentStatusRequestTypedDict": ".getdocumentstatusop", + "GetDocumentTextContentRequest": ".getdocumenttextcontentop", + "GetDocumentTextContentRequestTypedDict": ".getdocumenttextcontentop", + "GetFileSignedURLRequest": ".getfilesignedurlop", + "GetFileSignedURLRequestTypedDict": ".getfilesignedurlop", + "GetFineTuningJobRequest": ".getfinetuningjobop", + "GetFineTuningJobRequestTypedDict": ".getfinetuningjobop", + "GetFineTuningJobResponse": ".getfinetuningjobop", + "GetFineTuningJobResponseTypedDict": ".getfinetuningjobop", + "GetLibraryRequest": ".getlibraryop", + "GetLibraryRequestTypedDict": ".getlibraryop", "GithubRepositoryIn": ".githubrepositoryin", "GithubRepositoryInTypedDict": ".githubrepositoryin", "GithubRepositoryOut": ".githubrepositoryout", @@ -2050,93 +1971,46 @@ "JobInTypedDict": ".jobin", "JobMetadataOut": ".jobmetadataout", "JobMetadataOutTypedDict": ".jobmetadataout", - "JobsAPIRoutesBatchCancelBatchJobRequest": ".jobs_api_routes_batch_cancel_batch_jobop", - "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict": ".jobs_api_routes_batch_cancel_batch_jobop", - "JobsAPIRoutesBatchGetBatchJobRequest": ".jobs_api_routes_batch_get_batch_jobop", - "JobsAPIRoutesBatchGetBatchJobRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobop", - "JobsAPIRoutesBatchGetBatchJobsRequest": ".jobs_api_routes_batch_get_batch_jobsop", - "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobsop", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequest": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponse": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponse": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "Response": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "ResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobResponse": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", - "JobsAPIRoutesFineTuningGetFineTuningJobsStatus": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", - "JobsAPIRoutesFineTuningStartFineTuningJobRequest": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningStartFineTuningJobResponse": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", "JobsOut": ".jobsout", "JobsOutData": ".jobsout", "JobsOutDataTypedDict": ".jobsout", - "JobsOutObject": ".jobsout", "JobsOutTypedDict": ".jobsout", "JSONSchema": ".jsonschema", "JSONSchemaTypedDict": ".jsonschema", "LegacyJobMetadataOut": ".legacyjobmetadataout", - "LegacyJobMetadataOutObject": ".legacyjobmetadataout", "LegacyJobMetadataOutTypedDict": ".legacyjobmetadataout", - "LibrariesDeleteV1Request": ".libraries_delete_v1op", - "LibrariesDeleteV1RequestTypedDict": ".libraries_delete_v1op", - "LibrariesDocumentsDeleteV1Request": ".libraries_documents_delete_v1op", - "LibrariesDocumentsDeleteV1RequestTypedDict": ".libraries_documents_delete_v1op", - "LibrariesDocumentsGetExtractedTextSignedURLV1Request": ".libraries_documents_get_extracted_text_signed_url_v1op", - "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict": ".libraries_documents_get_extracted_text_signed_url_v1op", - "LibrariesDocumentsGetSignedURLV1Request": ".libraries_documents_get_signed_url_v1op", - "LibrariesDocumentsGetSignedURLV1RequestTypedDict": ".libraries_documents_get_signed_url_v1op", - "LibrariesDocumentsGetStatusV1Request": ".libraries_documents_get_status_v1op", - "LibrariesDocumentsGetStatusV1RequestTypedDict": ".libraries_documents_get_status_v1op", - "LibrariesDocumentsGetTextContentV1Request": ".libraries_documents_get_text_content_v1op", - "LibrariesDocumentsGetTextContentV1RequestTypedDict": ".libraries_documents_get_text_content_v1op", - "LibrariesDocumentsGetV1Request": ".libraries_documents_get_v1op", - "LibrariesDocumentsGetV1RequestTypedDict": ".libraries_documents_get_v1op", - "LibrariesDocumentsListV1Request": ".libraries_documents_list_v1op", - "LibrariesDocumentsListV1RequestTypedDict": ".libraries_documents_list_v1op", - "LibrariesDocumentsReprocessV1Request": ".libraries_documents_reprocess_v1op", - "LibrariesDocumentsReprocessV1RequestTypedDict": ".libraries_documents_reprocess_v1op", - "LibrariesDocumentsUpdateV1Request": ".libraries_documents_update_v1op", - "LibrariesDocumentsUpdateV1RequestTypedDict": ".libraries_documents_update_v1op", - "DocumentUpload": ".libraries_documents_upload_v1op", - "DocumentUploadTypedDict": ".libraries_documents_upload_v1op", - "LibrariesDocumentsUploadV1Request": ".libraries_documents_upload_v1op", - "LibrariesDocumentsUploadV1RequestTypedDict": ".libraries_documents_upload_v1op", - "LibrariesGetV1Request": ".libraries_get_v1op", - "LibrariesGetV1RequestTypedDict": ".libraries_get_v1op", - "LibrariesShareCreateV1Request": ".libraries_share_create_v1op", - "LibrariesShareCreateV1RequestTypedDict": ".libraries_share_create_v1op", - "LibrariesShareDeleteV1Request": ".libraries_share_delete_v1op", - "LibrariesShareDeleteV1RequestTypedDict": ".libraries_share_delete_v1op", - "LibrariesShareListV1Request": ".libraries_share_list_v1op", - "LibrariesShareListV1RequestTypedDict": ".libraries_share_list_v1op", - "LibrariesUpdateV1Request": ".libraries_update_v1op", - "LibrariesUpdateV1RequestTypedDict": ".libraries_update_v1op", "LibraryIn": ".libraryin", "LibraryInTypedDict": ".libraryin", "LibraryInUpdate": ".libraryinupdate", "LibraryInUpdateTypedDict": ".libraryinupdate", "LibraryOut": ".libraryout", "LibraryOutTypedDict": ".libraryout", + "ListAgentAliasesRequest": ".listagentaliasesop", + "ListAgentAliasesRequestTypedDict": ".listagentaliasesop", + "ListAgentsRequest": ".listagentsop", + "ListAgentsRequestTypedDict": ".listagentsop", + "ListAgentVersionsRequest": ".listagentversionsop", + "ListAgentVersionsRequestTypedDict": ".listagentversionsop", + "ListBatchJobsRequest": ".listbatchjobsop", + "ListBatchJobsRequestTypedDict": ".listbatchjobsop", + "OrderBy": ".listbatchjobsop", + "ListConversationsRequest": ".listconversationsop", + "ListConversationsRequestTypedDict": ".listconversationsop", + "ListConversationsResponse": ".listconversationsop", + "ListConversationsResponseTypedDict": ".listconversationsop", "ListDocumentOut": ".listdocumentout", "ListDocumentOutTypedDict": ".listdocumentout", + "ListDocumentsRequest": ".listdocumentsop", + "ListDocumentsRequestTypedDict": ".listdocumentsop", + "ListFilesRequest": ".listfilesop", + "ListFilesRequestTypedDict": ".listfilesop", "ListFilesOut": ".listfilesout", "ListFilesOutTypedDict": ".listfilesout", + "ListFineTuningJobsRequest": ".listfinetuningjobsop", + "ListFineTuningJobsRequestTypedDict": ".listfinetuningjobsop", + "ListFineTuningJobsStatus": ".listfinetuningjobsop", + "ListLibraryAccessesRequest": ".listlibraryaccessesop", + "ListLibraryAccessesRequestTypedDict": ".listlibraryaccessesop", "ListLibraryOut": ".listlibraryout", "ListLibraryOutTypedDict": ".listlibraryout", "ListSharingOut": ".listsharingout", @@ -2226,6 +2100,8 @@ "ReferenceChunk": ".referencechunk", "ReferenceChunkType": ".referencechunk", "ReferenceChunkTypedDict": ".referencechunk", + "ReprocessDocumentRequest": ".reprocessdocumentop", + "ReprocessDocumentRequestTypedDict": ".reprocessdocumentop", "RequestSource": ".requestsource", "ResponseDoneEvent": ".responsedoneevent", "ResponseDoneEventTypedDict": ".responsedoneevent", @@ -2237,12 +2113,18 @@ "ResponseStartedEvent": ".responsestartedevent", "ResponseStartedEventTypedDict": ".responsestartedevent", "ResponseValidationError": ".responsevalidationerror", - "ResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", - "ResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrieve_model_v1_models_model_id_getop", - "RetrieveModelV1ModelsModelIDGetRequest": ".retrieve_model_v1_models_model_id_getop", - "RetrieveModelV1ModelsModelIDGetRequestTypedDict": ".retrieve_model_v1_models_model_id_getop", + "RestartConversationRequest": ".restartconversationop", + "RestartConversationRequestTypedDict": ".restartconversationop", + "RestartConversationStreamRequest": ".restartconversationstreamop", + "RestartConversationStreamRequestTypedDict": ".restartconversationstreamop", + "RetrieveFileRequest": ".retrievefileop", + "RetrieveFileRequestTypedDict": ".retrievefileop", "RetrieveFileOut": ".retrievefileout", "RetrieveFileOutTypedDict": ".retrievefileout", + "ResponseRetrieveModelV1ModelsModelIDGet": ".retrievemodelop", + "ResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrievemodelop", + "RetrieveModelRequest": ".retrievemodelop", + "RetrieveModelRequestTypedDict": ".retrievemodelop", "SampleType": ".sampletype", "SDKError": ".sdkerror", "Security": ".security", @@ -2256,6 +2138,10 @@ "SharingOutTypedDict": ".sharingout", "Source": ".source", "SSETypes": ".ssetypes", + "StartFineTuningJobRequest": ".startfinetuningjobop", + "StartFineTuningJobRequestTypedDict": ".startfinetuningjobop", + "StartFineTuningJobResponse": ".startfinetuningjobop", + "StartFineTuningJobResponseTypedDict": ".startfinetuningjobop", "SystemMessage": ".systemmessage", "SystemMessageContent": ".systemmessage", "SystemMessageContentTypedDict": ".systemmessage", @@ -2332,10 +2218,31 @@ "TranscriptionStreamTextDelta": ".transcriptionstreamtextdelta", "TranscriptionStreamTextDeltaTypedDict": ".transcriptionstreamtextdelta", "UnarchiveFTModelOut": ".unarchiveftmodelout", - "UnarchiveFTModelOutObject": ".unarchiveftmodelout", "UnarchiveFTModelOutTypedDict": ".unarchiveftmodelout", + "UnarchiveModelRequest": ".unarchivemodelop", + "UnarchiveModelRequestTypedDict": ".unarchivemodelop", + "UpdateAgentRequest": ".updateagentop", + "UpdateAgentRequestTypedDict": ".updateagentop", + "UpdateAgentVersionRequest": ".updateagentversionop", + "UpdateAgentVersionRequestTypedDict": ".updateagentversionop", + "UpdateDocumentRequest": ".updatedocumentop", + "UpdateDocumentRequestTypedDict": ".updatedocumentop", "UpdateFTModelIn": ".updateftmodelin", "UpdateFTModelInTypedDict": ".updateftmodelin", + "UpdateLibraryRequest": ".updatelibraryop", + "UpdateLibraryRequestTypedDict": ".updatelibraryop", + "UpdateModelRequest": ".updatemodelop", + "UpdateModelRequestTypedDict": ".updatemodelop", + "UpdateModelResponse": ".updatemodelop", + "UpdateModelResponseTypedDict": ".updatemodelop", + "UpdateOrCreateLibraryAccessRequest": ".updateorcreatelibraryaccessop", + "UpdateOrCreateLibraryAccessRequestTypedDict": ".updateorcreatelibraryaccessop", + "DocumentUpload": ".uploaddocumentop", + "DocumentUploadTypedDict": ".uploaddocumentop", + "UploadDocumentRequest": ".uploaddocumentop", + "UploadDocumentRequestTypedDict": ".uploaddocumentop", + "MultiPartBodyParams": ".uploadfileop", + "MultiPartBodyParamsTypedDict": ".uploadfileop", "UploadFileOut": ".uploadfileout", "UploadFileOutTypedDict": ".uploadfileout", "UsageInfo": ".usageinfo", diff --git a/src/mistralai/client/models/agent.py b/src/mistralai/client/models/agent.py index b2fe3939..05ae24cd 100644 --- a/src/mistralai/client/models/agent.py +++ b/src/mistralai/client/models/agent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1336849c84fb from __future__ import annotations from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict @@ -70,6 +71,7 @@ class AgentTypedDict(TypedDict): handoffs: NotRequired[Nullable[List[str]]] metadata: NotRequired[Nullable[Dict[str, Any]]] object: NotRequired[AgentObject] + version_message: NotRequired[Nullable[str]] class Agent(BaseModel): @@ -108,6 +110,8 @@ class Agent(BaseModel): object: Optional[AgentObject] = "agent" + version_message: OptionalNullable[str] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -118,8 +122,15 @@ def serialize_model(self, handler): "handoffs", "metadata", "object", + "version_message", + ] + nullable_fields = [ + "instructions", + "description", + "handoffs", + "metadata", + "version_message", ] - nullable_fields = ["instructions", "description", "handoffs", "metadata"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/client/models/agentaliasresponse.py b/src/mistralai/client/models/agentaliasresponse.py index 4bc8225c..6972af2a 100644 --- a/src/mistralai/client/models/agentaliasresponse.py +++ b/src/mistralai/client/models/agentaliasresponse.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3899a98a55dd from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/agentconversation.py b/src/mistralai/client/models/agentconversation.py index 5dfa8c31..a850d54c 100644 --- a/src/mistralai/client/models/agentconversation.py +++ b/src/mistralai/client/models/agentconversation.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1b7d73eddf51 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/agentcreationrequest.py b/src/mistralai/client/models/agentcreationrequest.py index 561bef64..898d42a9 100644 --- a/src/mistralai/client/models/agentcreationrequest.py +++ b/src/mistralai/client/models/agentcreationrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 35b7f4933b3e from __future__ import annotations from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict @@ -58,6 +59,7 @@ class AgentCreationRequestTypedDict(TypedDict): description: NotRequired[Nullable[str]] handoffs: NotRequired[Nullable[List[str]]] metadata: NotRequired[Nullable[Dict[str, Any]]] + version_message: NotRequired[Nullable[str]] class AgentCreationRequest(BaseModel): @@ -80,6 +82,8 @@ class AgentCreationRequest(BaseModel): metadata: OptionalNullable[Dict[str, Any]] = UNSET + version_message: OptionalNullable[str] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -89,8 +93,15 @@ def serialize_model(self, handler): "description", "handoffs", "metadata", + "version_message", + ] + nullable_fields = [ + "instructions", + "description", + "handoffs", + "metadata", + "version_message", ] - nullable_fields = ["instructions", "description", "handoffs", "metadata"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/client/models/agenthandoffdoneevent.py b/src/mistralai/client/models/agenthandoffdoneevent.py index 40bb446b..40bf8497 100644 --- a/src/mistralai/client/models/agenthandoffdoneevent.py +++ b/src/mistralai/client/models/agenthandoffdoneevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 82628bb5fcea from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/agenthandoffentry.py b/src/mistralai/client/models/agenthandoffentry.py index 0b0de13f..b18fe17c 100644 --- a/src/mistralai/client/models/agenthandoffentry.py +++ b/src/mistralai/client/models/agenthandoffentry.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5030bcaa3a07 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/agenthandoffstartedevent.py b/src/mistralai/client/models/agenthandoffstartedevent.py index 93f56db0..e278aef3 100644 --- a/src/mistralai/client/models/agenthandoffstartedevent.py +++ b/src/mistralai/client/models/agenthandoffstartedevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2f6093d9b222 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/agentscompletionrequest.py b/src/mistralai/client/models/agentscompletionrequest.py index 3b045ed6..f4a2d646 100644 --- a/src/mistralai/client/models/agentscompletionrequest.py +++ b/src/mistralai/client/models/agentscompletionrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3960bc4c545f from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict diff --git a/src/mistralai/client/models/agentscompletionstreamrequest.py b/src/mistralai/client/models/agentscompletionstreamrequest.py index 23920c4e..732e2402 100644 --- a/src/mistralai/client/models/agentscompletionstreamrequest.py +++ b/src/mistralai/client/models/agentscompletionstreamrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1b73f90befc2 from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict diff --git a/src/mistralai/client/models/agentupdaterequest.py b/src/mistralai/client/models/agentupdaterequest.py index be93157d..96e209d4 100644 --- a/src/mistralai/client/models/agentupdaterequest.py +++ b/src/mistralai/client/models/agentupdaterequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2d5a3a437819 from __future__ import annotations from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict @@ -59,6 +60,7 @@ class AgentUpdateRequestTypedDict(TypedDict): handoffs: NotRequired[Nullable[List[str]]] deployment_chat: NotRequired[Nullable[bool]] metadata: NotRequired[Nullable[Dict[str, Any]]] + version_message: NotRequired[Nullable[str]] class AgentUpdateRequest(BaseModel): @@ -83,6 +85,8 @@ class AgentUpdateRequest(BaseModel): metadata: OptionalNullable[Dict[str, Any]] = UNSET + version_message: OptionalNullable[str] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -95,6 +99,7 @@ def serialize_model(self, handler): "handoffs", "deployment_chat", "metadata", + "version_message", ] nullable_fields = [ "instructions", @@ -104,6 +109,7 @@ def serialize_model(self, handler): "handoffs", "deployment_chat", "metadata", + "version_message", ] null_default_fields = [] diff --git a/src/mistralai/client/models/apiendpoint.py b/src/mistralai/client/models/apiendpoint.py index a6072d56..a6665c10 100644 --- a/src/mistralai/client/models/apiendpoint.py +++ b/src/mistralai/client/models/apiendpoint.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 00b34ce0a24d from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/agents_api_v1_conversations_appendop.py b/src/mistralai/client/models/appendconversationop.py similarity index 87% rename from src/mistralai/client/models/agents_api_v1_conversations_appendop.py rename to src/mistralai/client/models/appendconversationop.py index 13d07ba9..710b8e1c 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_appendop.py +++ b/src/mistralai/client/models/appendconversationop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1c47dd1e7c7e from __future__ import annotations from .conversationappendrequest import ( @@ -10,13 +11,13 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1ConversationsAppendRequestTypedDict(TypedDict): +class AppendConversationRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation to which we append entries.""" conversation_append_request: ConversationAppendRequestTypedDict -class AgentsAPIV1ConversationsAppendRequest(BaseModel): +class AppendConversationRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py b/src/mistralai/client/models/appendconversationstreamop.py similarity index 87% rename from src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py rename to src/mistralai/client/models/appendconversationstreamop.py index 9f00ffd4..55efca0e 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py +++ b/src/mistralai/client/models/appendconversationstreamop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1ab08b189e9d from __future__ import annotations from .conversationappendstreamrequest import ( @@ -10,13 +11,13 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1ConversationsAppendStreamRequestTypedDict(TypedDict): +class AppendConversationStreamRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation to which we append entries.""" conversation_append_stream_request: ConversationAppendStreamRequestTypedDict -class AgentsAPIV1ConversationsAppendStreamRequest(BaseModel): +class AppendConversationStreamRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/archiveftmodelout.py b/src/mistralai/client/models/archiveftmodelout.py index 6108c7e1..3107116c 100644 --- a/src/mistralai/client/models/archiveftmodelout.py +++ b/src/mistralai/client/models/archiveftmodelout.py @@ -1,23 +1,27 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bab499599d30 from __future__ import annotations from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ArchiveFTModelOutObject = Literal["model",] +from typing_extensions import Annotated, NotRequired, TypedDict class ArchiveFTModelOutTypedDict(TypedDict): id: str - object: NotRequired[ArchiveFTModelOutObject] + object: Literal["model"] archived: NotRequired[bool] class ArchiveFTModelOut(BaseModel): id: str - object: Optional[ArchiveFTModelOutObject] = "model" + OBJECT: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" archived: Optional[bool] = True diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py b/src/mistralai/client/models/archivemodelop.py similarity index 76% rename from src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py rename to src/mistralai/client/models/archivemodelop.py index 4536b738..30b4a9bd 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +++ b/src/mistralai/client/models/archivemodelop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: beefa1df3b7c from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict(TypedDict): +class ArchiveModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to archive.""" -class JobsAPIRoutesFineTuningArchiveFineTunedModelRequest(BaseModel): +class ArchiveModelRequest(BaseModel): model_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/assistantmessage.py b/src/mistralai/client/models/assistantmessage.py index 3ba14ce7..5a4a2085 100644 --- a/src/mistralai/client/models/assistantmessage.py +++ b/src/mistralai/client/models/assistantmessage.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2b49546e0742 from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict diff --git a/src/mistralai/client/models/audiochunk.py b/src/mistralai/client/models/audiochunk.py index fae1193c..a5186827 100644 --- a/src/mistralai/client/models/audiochunk.py +++ b/src/mistralai/client/models/audiochunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ce5dce4dced2 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/audioencoding.py b/src/mistralai/client/models/audioencoding.py index 557f53ed..67fec75d 100644 --- a/src/mistralai/client/models/audioencoding.py +++ b/src/mistralai/client/models/audioencoding.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b14e6a50f730 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/audioformat.py b/src/mistralai/client/models/audioformat.py index 7ea10b3a..fef87ae7 100644 --- a/src/mistralai/client/models/audioformat.py +++ b/src/mistralai/client/models/audioformat.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c8655712c218 from __future__ import annotations from .audioencoding import AudioEncoding diff --git a/src/mistralai/client/models/audiotranscriptionrequest.py b/src/mistralai/client/models/audiotranscriptionrequest.py index 78a37978..8c47a83c 100644 --- a/src/mistralai/client/models/audiotranscriptionrequest.py +++ b/src/mistralai/client/models/audiotranscriptionrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e4148b4d23e7 from __future__ import annotations from .file import File, FileTypedDict diff --git a/src/mistralai/client/models/audiotranscriptionrequeststream.py b/src/mistralai/client/models/audiotranscriptionrequeststream.py index 35064361..a080cee2 100644 --- a/src/mistralai/client/models/audiotranscriptionrequeststream.py +++ b/src/mistralai/client/models/audiotranscriptionrequeststream.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 33a07317a3b3 from __future__ import annotations from .file import File, FileTypedDict diff --git a/src/mistralai/client/models/basemodelcard.py b/src/mistralai/client/models/basemodelcard.py index f16607d5..17a3e5c9 100644 --- a/src/mistralai/client/models/basemodelcard.py +++ b/src/mistralai/client/models/basemodelcard.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 556ebdc33276 from __future__ import annotations from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict diff --git a/src/mistralai/client/models/batcherror.py b/src/mistralai/client/models/batcherror.py index a9c8362b..c1bf722a 100644 --- a/src/mistralai/client/models/batcherror.py +++ b/src/mistralai/client/models/batcherror.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1563e2a576ec from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/batchjobin.py b/src/mistralai/client/models/batchjobin.py index 39cf70b5..a0c3b914 100644 --- a/src/mistralai/client/models/batchjobin.py +++ b/src/mistralai/client/models/batchjobin.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 72b25c2038d4 from __future__ import annotations from .apiendpoint import APIEndpoint diff --git a/src/mistralai/client/models/batchjobout.py b/src/mistralai/client/models/batchjobout.py index 008d43b4..99c2b951 100644 --- a/src/mistralai/client/models/batchjobout.py +++ b/src/mistralai/client/models/batchjobout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cbf1d872a46e from __future__ import annotations from .batcherror import BatchError, BatchErrorTypedDict @@ -10,12 +11,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Any, Dict, List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -BatchJobOutObject = Literal["batch",] +from typing_extensions import Annotated, NotRequired, TypedDict class BatchJobOutTypedDict(TypedDict): @@ -29,7 +30,7 @@ class BatchJobOutTypedDict(TypedDict): completed_requests: int succeeded_requests: int failed_requests: int - object: NotRequired[BatchJobOutObject] + object: Literal["batch"] metadata: NotRequired[Nullable[Dict[str, Any]]] model: NotRequired[Nullable[str]] agent_id: NotRequired[Nullable[str]] @@ -61,7 +62,10 @@ class BatchJobOut(BaseModel): failed_requests: int - object: Optional[BatchJobOutObject] = "batch" + OBJECT: Annotated[ + Annotated[Optional[Literal["batch"]], AfterValidator(validate_const("batch"))], + pydantic.Field(alias="object"), + ] = "batch" metadata: OptionalNullable[Dict[str, Any]] = UNSET diff --git a/src/mistralai/client/models/batchjobsout.py b/src/mistralai/client/models/batchjobsout.py index 2654dac0..f65fc040 100644 --- a/src/mistralai/client/models/batchjobsout.py +++ b/src/mistralai/client/models/batchjobsout.py @@ -1,19 +1,20 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 20b2516e7efa from __future__ import annotations from .batchjobout import BatchJobOut, BatchJobOutTypedDict from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -BatchJobsOutObject = Literal["list",] +from typing_extensions import Annotated, NotRequired, TypedDict class BatchJobsOutTypedDict(TypedDict): total: int data: NotRequired[List[BatchJobOutTypedDict]] - object: NotRequired[BatchJobsOutObject] + object: Literal["list"] class BatchJobsOut(BaseModel): @@ -21,4 +22,7 @@ class BatchJobsOut(BaseModel): data: Optional[List[BatchJobOut]] = None - object: Optional[BatchJobsOutObject] = "list" + OBJECT: Annotated[ + Annotated[Optional[Literal["list"]], AfterValidator(validate_const("list"))], + pydantic.Field(alias="object"), + ] = "list" diff --git a/src/mistralai/client/models/batchjobstatus.py b/src/mistralai/client/models/batchjobstatus.py index 1ba3dd55..bd77faa2 100644 --- a/src/mistralai/client/models/batchjobstatus.py +++ b/src/mistralai/client/models/batchjobstatus.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 61e08cf5eea9 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/batchrequest.py b/src/mistralai/client/models/batchrequest.py index 24f50a9a..41c45234 100644 --- a/src/mistralai/client/models/batchrequest.py +++ b/src/mistralai/client/models/batchrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6f36819eeb46 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/builtinconnectors.py b/src/mistralai/client/models/builtinconnectors.py index 4a98b45b..ecf60d3c 100644 --- a/src/mistralai/client/models/builtinconnectors.py +++ b/src/mistralai/client/models/builtinconnectors.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2d276ce938dc from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py b/src/mistralai/client/models/cancelbatchjobop.py similarity index 76% rename from src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py rename to src/mistralai/client/models/cancelbatchjobop.py index 21a04f73..cd94ee86 100644 --- a/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py +++ b/src/mistralai/client/models/cancelbatchjobop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cebac10b56a9 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class JobsAPIRoutesBatchCancelBatchJobRequestTypedDict(TypedDict): +class CancelBatchJobRequestTypedDict(TypedDict): job_id: str -class JobsAPIRoutesBatchCancelBatchJobRequest(BaseModel): +class CancelBatchJobRequest(BaseModel): job_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/client/models/cancelfinetuningjobop.py similarity index 73% rename from src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py rename to src/mistralai/client/models/cancelfinetuningjobop.py index 5d9c026b..ddd445bb 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +++ b/src/mistralai/client/models/cancelfinetuningjobop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c9a1b39f0d02 from __future__ import annotations from .classifierdetailedjobout import ( @@ -16,26 +17,26 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): +class CancelFineTuningJobRequestTypedDict(TypedDict): job_id: str r"""The ID of the job to cancel.""" -class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): +class CancelFineTuningJobRequest(BaseModel): job_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] r"""The ID of the job to cancel.""" -JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", +CancelFineTuningJobResponseTypedDict = TypeAliasType( + "CancelFineTuningJobResponseTypedDict", Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], ) r"""OK""" -JobsAPIRoutesFineTuningCancelFineTuningJobResponse = Annotated[ +CancelFineTuningJobResponse = Annotated[ Union[ClassifierDetailedJobOut, CompletionDetailedJobOut], Field(discriminator="JOB_TYPE"), ] diff --git a/src/mistralai/client/models/chatclassificationrequest.py b/src/mistralai/client/models/chatclassificationrequest.py index 45081022..8b6d07b9 100644 --- a/src/mistralai/client/models/chatclassificationrequest.py +++ b/src/mistralai/client/models/chatclassificationrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: afd9cdc71834 from __future__ import annotations from .inputs import Inputs, InputsTypedDict diff --git a/src/mistralai/client/models/chatcompletionchoice.py b/src/mistralai/client/models/chatcompletionchoice.py index 5752f7c1..2c515f6e 100644 --- a/src/mistralai/client/models/chatcompletionchoice.py +++ b/src/mistralai/client/models/chatcompletionchoice.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7e6a512f6a04 from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict diff --git a/src/mistralai/client/models/chatcompletionrequest.py b/src/mistralai/client/models/chatcompletionrequest.py index 62c375e0..4f7d071b 100644 --- a/src/mistralai/client/models/chatcompletionrequest.py +++ b/src/mistralai/client/models/chatcompletionrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9979805d8c38 from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict diff --git a/src/mistralai/client/models/chatcompletionresponse.py b/src/mistralai/client/models/chatcompletionresponse.py index 60a1f561..7092bbc1 100644 --- a/src/mistralai/client/models/chatcompletionresponse.py +++ b/src/mistralai/client/models/chatcompletionresponse.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 669d996b8e82 from __future__ import annotations from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict diff --git a/src/mistralai/client/models/chatcompletionstreamrequest.py b/src/mistralai/client/models/chatcompletionstreamrequest.py index 4e5c281d..ec7d2ae1 100644 --- a/src/mistralai/client/models/chatcompletionstreamrequest.py +++ b/src/mistralai/client/models/chatcompletionstreamrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 18cb2b2415d4 from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict diff --git a/src/mistralai/client/models/chatmoderationrequest.py b/src/mistralai/client/models/chatmoderationrequest.py index 4e2611c8..a8d021e8 100644 --- a/src/mistralai/client/models/chatmoderationrequest.py +++ b/src/mistralai/client/models/chatmoderationrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 057aecb07275 from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict diff --git a/src/mistralai/client/models/checkpointout.py b/src/mistralai/client/models/checkpointout.py index 89189ed1..3e8d90e9 100644 --- a/src/mistralai/client/models/checkpointout.py +++ b/src/mistralai/client/models/checkpointout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3866fe32cd7c from __future__ import annotations from .metricout import MetricOut, MetricOutTypedDict diff --git a/src/mistralai/client/models/classificationrequest.py b/src/mistralai/client/models/classificationrequest.py index c724ff53..903706c3 100644 --- a/src/mistralai/client/models/classificationrequest.py +++ b/src/mistralai/client/models/classificationrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6942fe3de24a from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/classificationresponse.py b/src/mistralai/client/models/classificationresponse.py index 4bc21a58..d2f09f43 100644 --- a/src/mistralai/client/models/classificationresponse.py +++ b/src/mistralai/client/models/classificationresponse.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: eaf279db1109 from __future__ import annotations from .classificationtargetresult import ( diff --git a/src/mistralai/client/models/classificationtargetresult.py b/src/mistralai/client/models/classificationtargetresult.py index 89a137c3..6c7d6231 100644 --- a/src/mistralai/client/models/classificationtargetresult.py +++ b/src/mistralai/client/models/classificationtargetresult.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2445f12b2a57 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/classifierdetailedjobout.py b/src/mistralai/client/models/classifierdetailedjobout.py index ffe99270..bc5c5381 100644 --- a/src/mistralai/client/models/classifierdetailedjobout.py +++ b/src/mistralai/client/models/classifierdetailedjobout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d8daeb39ef9f from __future__ import annotations from .checkpointout import CheckpointOut, CheckpointOutTypedDict @@ -43,9 +44,6 @@ ] -ClassifierDetailedJobOutObject = Literal["job",] - - ClassifierDetailedJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict @@ -56,7 +54,6 @@ class ClassifierDetailedJobOutTypedDict(TypedDict): id: str auto_start: bool model: str - r"""The name of the model to fine-tune.""" status: ClassifierDetailedJobOutStatus created_at: int modified_at: int @@ -64,7 +61,7 @@ class ClassifierDetailedJobOutTypedDict(TypedDict): hyperparameters: ClassifierTrainingParametersTypedDict classifier_targets: List[ClassifierTargetOutTypedDict] validation_files: NotRequired[Nullable[List[str]]] - object: NotRequired[ClassifierDetailedJobOutObject] + object: Literal["job"] fine_tuned_model: NotRequired[Nullable[str]] suffix: NotRequired[Nullable[str]] integrations: NotRequired[ @@ -84,7 +81,6 @@ class ClassifierDetailedJobOut(BaseModel): auto_start: bool model: str - r"""The name of the model to fine-tune.""" status: ClassifierDetailedJobOutStatus @@ -100,7 +96,10 @@ class ClassifierDetailedJobOut(BaseModel): validation_files: OptionalNullable[List[str]] = UNSET - object: Optional[ClassifierDetailedJobOutObject] = "job" + OBJECT: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" fine_tuned_model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/client/models/classifierftmodelout.py b/src/mistralai/client/models/classifierftmodelout.py index c6d34167..182f4954 100644 --- a/src/mistralai/client/models/classifierftmodelout.py +++ b/src/mistralai/client/models/classifierftmodelout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2903a7123b06 from __future__ import annotations from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict @@ -21,9 +22,6 @@ from typing_extensions import Annotated, NotRequired, TypedDict -ClassifierFTModelOutObject = Literal["model",] - - class ClassifierFTModelOutTypedDict(TypedDict): id: str created: int @@ -35,7 +33,7 @@ class ClassifierFTModelOutTypedDict(TypedDict): capabilities: FTModelCapabilitiesOutTypedDict job: str classifier_targets: List[ClassifierTargetOutTypedDict] - object: NotRequired[ClassifierFTModelOutObject] + object: Literal["model"] name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] max_context_length: NotRequired[int] @@ -64,7 +62,10 @@ class ClassifierFTModelOut(BaseModel): classifier_targets: List[ClassifierTargetOut] - object: Optional[ClassifierFTModelOutObject] = "model" + OBJECT: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" name: OptionalNullable[str] = UNSET diff --git a/src/mistralai/client/models/classifierjobout.py b/src/mistralai/client/models/classifierjobout.py index 1390aea1..03a5b11c 100644 --- a/src/mistralai/client/models/classifierjobout.py +++ b/src/mistralai/client/models/classifierjobout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e19e9c4416cc from __future__ import annotations from .classifiertrainingparameters import ( @@ -41,10 +42,6 @@ r"""The current status of the fine-tuning job.""" -ClassifierJobOutObject = Literal["job",] -r"""The object type of the fine-tuning job.""" - - ClassifierJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict @@ -56,7 +53,6 @@ class ClassifierJobOutTypedDict(TypedDict): r"""The ID of the job.""" auto_start: bool model: str - r"""The name of the model to fine-tune.""" status: ClassifierJobOutStatus r"""The current status of the fine-tuning job.""" created_at: int @@ -68,7 +64,7 @@ class ClassifierJobOutTypedDict(TypedDict): hyperparameters: ClassifierTrainingParametersTypedDict validation_files: NotRequired[Nullable[List[str]]] r"""A list containing the IDs of uploaded files that contain validation data.""" - object: NotRequired[ClassifierJobOutObject] + object: Literal["job"] r"""The object type of the fine-tuning job.""" fine_tuned_model: NotRequired[Nullable[str]] r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" @@ -90,7 +86,6 @@ class ClassifierJobOut(BaseModel): auto_start: bool model: str - r"""The name of the model to fine-tune.""" status: ClassifierJobOutStatus r"""The current status of the fine-tuning job.""" @@ -109,7 +104,10 @@ class ClassifierJobOut(BaseModel): validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data.""" - object: Optional[ClassifierJobOutObject] = "job" + OBJECT: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" r"""The object type of the fine-tuning job.""" fine_tuned_model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/client/models/classifiertargetin.py b/src/mistralai/client/models/classifiertargetin.py index 231ee21e..b250109b 100644 --- a/src/mistralai/client/models/classifiertargetin.py +++ b/src/mistralai/client/models/classifiertargetin.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ed021de1c06c from __future__ import annotations from .ftclassifierlossfunction import FTClassifierLossFunction diff --git a/src/mistralai/client/models/classifiertargetout.py b/src/mistralai/client/models/classifiertargetout.py index 957104a7..3d41a4d9 100644 --- a/src/mistralai/client/models/classifiertargetout.py +++ b/src/mistralai/client/models/classifiertargetout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5131f55abefe from __future__ import annotations from .ftclassifierlossfunction import FTClassifierLossFunction diff --git a/src/mistralai/client/models/classifiertrainingparameters.py b/src/mistralai/client/models/classifiertrainingparameters.py index 60f53c37..f360eda5 100644 --- a/src/mistralai/client/models/classifiertrainingparameters.py +++ b/src/mistralai/client/models/classifiertrainingparameters.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4000b05e3b8d from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/classifiertrainingparametersin.py b/src/mistralai/client/models/classifiertrainingparametersin.py index e24c9dde..85360a7e 100644 --- a/src/mistralai/client/models/classifiertrainingparametersin.py +++ b/src/mistralai/client/models/classifiertrainingparametersin.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4b33d5cf0345 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/codeinterpretertool.py b/src/mistralai/client/models/codeinterpretertool.py index 2f34cbda..f69c7a57 100644 --- a/src/mistralai/client/models/codeinterpretertool.py +++ b/src/mistralai/client/models/codeinterpretertool.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 950cd8f4ad49 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/completionargs.py b/src/mistralai/client/models/completionargs.py index 010910f6..918832ac 100644 --- a/src/mistralai/client/models/completionargs.py +++ b/src/mistralai/client/models/completionargs.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3db008bcddca from __future__ import annotations from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict diff --git a/src/mistralai/client/models/completionargsstop.py b/src/mistralai/client/models/completionargsstop.py index de7a0956..39c858e6 100644 --- a/src/mistralai/client/models/completionargsstop.py +++ b/src/mistralai/client/models/completionargsstop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5f339214501d from __future__ import annotations from typing import List, Union diff --git a/src/mistralai/client/models/completionchunk.py b/src/mistralai/client/models/completionchunk.py index 9790db6f..67f447d0 100644 --- a/src/mistralai/client/models/completionchunk.py +++ b/src/mistralai/client/models/completionchunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d786b44926f4 from __future__ import annotations from .completionresponsestreamchoice import ( diff --git a/src/mistralai/client/models/completiondetailedjobout.py b/src/mistralai/client/models/completiondetailedjobout.py index ea444b8b..cd3a86ee 100644 --- a/src/mistralai/client/models/completiondetailedjobout.py +++ b/src/mistralai/client/models/completiondetailedjobout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9bc38dcfbddf from __future__ import annotations from .checkpointout import CheckpointOut, CheckpointOutTypedDict @@ -43,9 +44,6 @@ ] -CompletionDetailedJobOutObject = Literal["job",] - - CompletionDetailedJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict @@ -62,14 +60,13 @@ class CompletionDetailedJobOutTypedDict(TypedDict): id: str auto_start: bool model: str - r"""The name of the model to fine-tune.""" status: CompletionDetailedJobOutStatus created_at: int modified_at: int training_files: List[str] hyperparameters: CompletionTrainingParametersTypedDict validation_files: NotRequired[Nullable[List[str]]] - object: NotRequired[CompletionDetailedJobOutObject] + object: Literal["job"] fine_tuned_model: NotRequired[Nullable[str]] suffix: NotRequired[Nullable[str]] integrations: NotRequired[ @@ -90,7 +87,6 @@ class CompletionDetailedJobOut(BaseModel): auto_start: bool model: str - r"""The name of the model to fine-tune.""" status: CompletionDetailedJobOutStatus @@ -104,7 +100,10 @@ class CompletionDetailedJobOut(BaseModel): validation_files: OptionalNullable[List[str]] = UNSET - object: Optional[CompletionDetailedJobOutObject] = "job" + OBJECT: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" fine_tuned_model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/client/models/completionevent.py b/src/mistralai/client/models/completionevent.py index 52db911e..3b90ab0c 100644 --- a/src/mistralai/client/models/completionevent.py +++ b/src/mistralai/client/models/completionevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c68817e7e190 from __future__ import annotations from .completionchunk import CompletionChunk, CompletionChunkTypedDict diff --git a/src/mistralai/client/models/completionftmodelout.py b/src/mistralai/client/models/completionftmodelout.py index 92f530af..7ecbf54a 100644 --- a/src/mistralai/client/models/completionftmodelout.py +++ b/src/mistralai/client/models/completionftmodelout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0f5277833b3e from __future__ import annotations from .ftmodelcapabilitiesout import ( @@ -20,9 +21,6 @@ from typing_extensions import Annotated, NotRequired, TypedDict -CompletionFTModelOutObject = Literal["model",] - - class CompletionFTModelOutTypedDict(TypedDict): id: str created: int @@ -33,7 +31,7 @@ class CompletionFTModelOutTypedDict(TypedDict): archived: bool capabilities: FTModelCapabilitiesOutTypedDict job: str - object: NotRequired[CompletionFTModelOutObject] + object: Literal["model"] name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] max_context_length: NotRequired[int] @@ -60,7 +58,10 @@ class CompletionFTModelOut(BaseModel): job: str - object: Optional[CompletionFTModelOutObject] = "model" + OBJECT: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" name: OptionalNullable[str] = UNSET diff --git a/src/mistralai/client/models/completionjobout.py b/src/mistralai/client/models/completionjobout.py index 1628d8bb..42e5f6c6 100644 --- a/src/mistralai/client/models/completionjobout.py +++ b/src/mistralai/client/models/completionjobout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 712e6c524f9a from __future__ import annotations from .completiontrainingparameters import ( @@ -42,10 +43,6 @@ r"""The current status of the fine-tuning job.""" -CompletionJobOutObject = Literal["job",] -r"""The object type of the fine-tuning job.""" - - CompletionJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict @@ -63,7 +60,6 @@ class CompletionJobOutTypedDict(TypedDict): r"""The ID of the job.""" auto_start: bool model: str - r"""The name of the model to fine-tune.""" status: CompletionJobOutStatus r"""The current status of the fine-tuning job.""" created_at: int @@ -75,7 +71,7 @@ class CompletionJobOutTypedDict(TypedDict): hyperparameters: CompletionTrainingParametersTypedDict validation_files: NotRequired[Nullable[List[str]]] r"""A list containing the IDs of uploaded files that contain validation data.""" - object: NotRequired[CompletionJobOutObject] + object: Literal["job"] r"""The object type of the fine-tuning job.""" fine_tuned_model: NotRequired[Nullable[str]] r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" @@ -98,7 +94,6 @@ class CompletionJobOut(BaseModel): auto_start: bool model: str - r"""The name of the model to fine-tune.""" status: CompletionJobOutStatus r"""The current status of the fine-tuning job.""" @@ -117,7 +112,10 @@ class CompletionJobOut(BaseModel): validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data.""" - object: Optional[CompletionJobOutObject] = "job" + OBJECT: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" r"""The object type of the fine-tuning job.""" fine_tuned_model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/client/models/completionresponsestreamchoice.py b/src/mistralai/client/models/completionresponsestreamchoice.py index 1b8d6fac..119a9690 100644 --- a/src/mistralai/client/models/completionresponsestreamchoice.py +++ b/src/mistralai/client/models/completionresponsestreamchoice.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5969a6bc07f3 from __future__ import annotations from .deltamessage import DeltaMessage, DeltaMessageTypedDict diff --git a/src/mistralai/client/models/completiontrainingparameters.py b/src/mistralai/client/models/completiontrainingparameters.py index 36b285ab..4b846b1b 100644 --- a/src/mistralai/client/models/completiontrainingparameters.py +++ b/src/mistralai/client/models/completiontrainingparameters.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: be202ea0d5a6 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/completiontrainingparametersin.py b/src/mistralai/client/models/completiontrainingparametersin.py index d0315d99..20b74ad9 100644 --- a/src/mistralai/client/models/completiontrainingparametersin.py +++ b/src/mistralai/client/models/completiontrainingparametersin.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0df22b873b5f from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/contentchunk.py b/src/mistralai/client/models/contentchunk.py index 0a25423f..eff4b8c6 100644 --- a/src/mistralai/client/models/contentchunk.py +++ b/src/mistralai/client/models/contentchunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c007f5ee0325 from __future__ import annotations from .audiochunk import AudioChunk, AudioChunkTypedDict diff --git a/src/mistralai/client/models/conversationappendrequest.py b/src/mistralai/client/models/conversationappendrequest.py index 867c0a41..0f07475e 100644 --- a/src/mistralai/client/models/conversationappendrequest.py +++ b/src/mistralai/client/models/conversationappendrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 81ce529e0865 from __future__ import annotations from .completionargs import CompletionArgs, CompletionArgsTypedDict diff --git a/src/mistralai/client/models/conversationappendstreamrequest.py b/src/mistralai/client/models/conversationappendstreamrequest.py index f51407bf..a0d46f72 100644 --- a/src/mistralai/client/models/conversationappendstreamrequest.py +++ b/src/mistralai/client/models/conversationappendstreamrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 27ada745e6ad from __future__ import annotations from .completionargs import CompletionArgs, CompletionArgsTypedDict diff --git a/src/mistralai/client/models/conversationevents.py b/src/mistralai/client/models/conversationevents.py index 1c2b4592..f2476038 100644 --- a/src/mistralai/client/models/conversationevents.py +++ b/src/mistralai/client/models/conversationevents.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8c8b08d853f6 from __future__ import annotations from .agenthandoffdoneevent import AgentHandoffDoneEvent, AgentHandoffDoneEventTypedDict diff --git a/src/mistralai/client/models/conversationhistory.py b/src/mistralai/client/models/conversationhistory.py index 83e860f2..92d6cbf9 100644 --- a/src/mistralai/client/models/conversationhistory.py +++ b/src/mistralai/client/models/conversationhistory.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 60a51ff1682b from __future__ import annotations from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict diff --git a/src/mistralai/client/models/conversationinputs.py b/src/mistralai/client/models/conversationinputs.py index 4d30cd76..7ce3ffc3 100644 --- a/src/mistralai/client/models/conversationinputs.py +++ b/src/mistralai/client/models/conversationinputs.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 711b769f2c40 from __future__ import annotations from .inputentries import InputEntries, InputEntriesTypedDict diff --git a/src/mistralai/client/models/conversationmessages.py b/src/mistralai/client/models/conversationmessages.py index 1ea05369..1aa294a4 100644 --- a/src/mistralai/client/models/conversationmessages.py +++ b/src/mistralai/client/models/conversationmessages.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 011c39501c26 from __future__ import annotations from .messageentries import MessageEntries, MessageEntriesTypedDict diff --git a/src/mistralai/client/models/conversationrequest.py b/src/mistralai/client/models/conversationrequest.py index dd66c6ce..2005be82 100644 --- a/src/mistralai/client/models/conversationrequest.py +++ b/src/mistralai/client/models/conversationrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 58e3ae67f149 from __future__ import annotations from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict diff --git a/src/mistralai/client/models/conversationresponse.py b/src/mistralai/client/models/conversationresponse.py index 0a11fff8..24598ef3 100644 --- a/src/mistralai/client/models/conversationresponse.py +++ b/src/mistralai/client/models/conversationresponse.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ad7a8472c7bf from __future__ import annotations from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict diff --git a/src/mistralai/client/models/conversationrestartrequest.py b/src/mistralai/client/models/conversationrestartrequest.py index aa2bf7b0..35d30993 100644 --- a/src/mistralai/client/models/conversationrestartrequest.py +++ b/src/mistralai/client/models/conversationrestartrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 681d90d50514 from __future__ import annotations from .completionargs import CompletionArgs, CompletionArgsTypedDict diff --git a/src/mistralai/client/models/conversationrestartstreamrequest.py b/src/mistralai/client/models/conversationrestartstreamrequest.py index 689815eb..0ddfb130 100644 --- a/src/mistralai/client/models/conversationrestartstreamrequest.py +++ b/src/mistralai/client/models/conversationrestartstreamrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 521c2b5bfb2b from __future__ import annotations from .completionargs import CompletionArgs, CompletionArgsTypedDict diff --git a/src/mistralai/client/models/conversationstreamrequest.py b/src/mistralai/client/models/conversationstreamrequest.py index 9b8d0c44..379a8f28 100644 --- a/src/mistralai/client/models/conversationstreamrequest.py +++ b/src/mistralai/client/models/conversationstreamrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 58d633507527 from __future__ import annotations from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict diff --git a/src/mistralai/client/models/conversationusageinfo.py b/src/mistralai/client/models/conversationusageinfo.py index 7a818c89..98db0f16 100644 --- a/src/mistralai/client/models/conversationusageinfo.py +++ b/src/mistralai/client/models/conversationusageinfo.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6685e3b50b50 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/client/models/createfinetuningjobop.py similarity index 71% rename from src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py rename to src/mistralai/client/models/createfinetuningjobop.py index c54aaa5e..f55deef5 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +++ b/src/mistralai/client/models/createfinetuningjobop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fd3c305df250 from __future__ import annotations from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict @@ -19,15 +20,14 @@ ] -JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", +CreateFineTuningJobResponseTypedDict = TypeAliasType( + "CreateFineTuningJobResponseTypedDict", Union[LegacyJobMetadataOutTypedDict, ResponseTypedDict], ) r"""OK""" -JobsAPIRoutesFineTuningCreateFineTuningJobResponse = TypeAliasType( - "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", - Union[LegacyJobMetadataOut, Response], +CreateFineTuningJobResponse = TypeAliasType( + "CreateFineTuningJobResponse", Union[LegacyJobMetadataOut, Response] ) r"""OK""" diff --git a/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py b/src/mistralai/client/models/createorupdateagentaliasop.py similarity index 83% rename from src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py rename to src/mistralai/client/models/createorupdateagentaliasop.py index 33da325c..cde1dd05 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py +++ b/src/mistralai/client/models/createorupdateagentaliasop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a79cf28bda01 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,13 +7,13 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict(TypedDict): +class CreateOrUpdateAgentAliasRequestTypedDict(TypedDict): agent_id: str alias: str version: int -class AgentsAPIV1AgentsCreateOrUpdateAliasRequest(BaseModel): +class CreateOrUpdateAgentAliasRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/deleteagentaliasop.py b/src/mistralai/client/models/deleteagentaliasop.py new file mode 100644 index 00000000..c52d099e --- /dev/null +++ b/src/mistralai/client/models/deleteagentaliasop.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e4d0d7f75b24 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing_extensions import Annotated, TypedDict + + +class DeleteAgentAliasRequestTypedDict(TypedDict): + agent_id: str + alias: str + + +class DeleteAgentAliasRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + alias: Annotated[ + str, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_deleteop.py b/src/mistralai/client/models/deleteagentop.py similarity index 78% rename from src/mistralai/client/models/agents_api_v1_agents_deleteop.py rename to src/mistralai/client/models/deleteagentop.py index 58fe902f..8b14bca7 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_deleteop.py +++ b/src/mistralai/client/models/deleteagentop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 089fb7f87aea from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1AgentsDeleteRequestTypedDict(TypedDict): +class DeleteAgentRequestTypedDict(TypedDict): agent_id: str -class AgentsAPIV1AgentsDeleteRequest(BaseModel): +class DeleteAgentRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py b/src/mistralai/client/models/deleteconversationop.py similarity index 81% rename from src/mistralai/client/models/agents_api_v1_conversations_deleteop.py rename to src/mistralai/client/models/deleteconversationop.py index 81066f90..39607f40 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py +++ b/src/mistralai/client/models/deleteconversationop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 86fefc353db0 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1ConversationsDeleteRequestTypedDict(TypedDict): +class DeleteConversationRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation from which we are fetching metadata.""" -class AgentsAPIV1ConversationsDeleteRequest(BaseModel): +class DeleteConversationRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_documents_reprocess_v1op.py b/src/mistralai/client/models/deletedocumentop.py similarity index 82% rename from src/mistralai/client/models/libraries_documents_reprocess_v1op.py rename to src/mistralai/client/models/deletedocumentop.py index 8aee7552..400070a4 100644 --- a/src/mistralai/client/models/libraries_documents_reprocess_v1op.py +++ b/src/mistralai/client/models/deletedocumentop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 62522db1ccf2 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsReprocessV1RequestTypedDict(TypedDict): +class DeleteDocumentRequestTypedDict(TypedDict): library_id: str document_id: str -class LibrariesDocumentsReprocessV1Request(BaseModel): +class DeleteDocumentRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/files_api_routes_delete_fileop.py b/src/mistralai/client/models/deletefileop.py similarity index 78% rename from src/mistralai/client/models/files_api_routes_delete_fileop.py rename to src/mistralai/client/models/deletefileop.py index b7174866..4feb7812 100644 --- a/src/mistralai/client/models/files_api_routes_delete_fileop.py +++ b/src/mistralai/client/models/deletefileop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 286b4e583638 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class FilesAPIRoutesDeleteFileRequestTypedDict(TypedDict): +class DeleteFileRequestTypedDict(TypedDict): file_id: str -class FilesAPIRoutesDeleteFileRequest(BaseModel): +class DeleteFileRequest(BaseModel): file_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/deletefileout.py b/src/mistralai/client/models/deletefileout.py index b25538be..c721f32c 100644 --- a/src/mistralai/client/models/deletefileout.py +++ b/src/mistralai/client/models/deletefileout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5578701e7327 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/libraries_share_delete_v1op.py b/src/mistralai/client/models/deletelibraryaccessop.py similarity index 83% rename from src/mistralai/client/models/libraries_share_delete_v1op.py rename to src/mistralai/client/models/deletelibraryaccessop.py index 620527d5..ca14c3ff 100644 --- a/src/mistralai/client/models/libraries_share_delete_v1op.py +++ b/src/mistralai/client/models/deletelibraryaccessop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: df80945bcf19 from __future__ import annotations from .sharingdelete import SharingDelete, SharingDeleteTypedDict @@ -7,12 +8,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesShareDeleteV1RequestTypedDict(TypedDict): +class DeleteLibraryAccessRequestTypedDict(TypedDict): library_id: str sharing_delete: SharingDeleteTypedDict -class LibrariesShareDeleteV1Request(BaseModel): +class DeleteLibraryAccessRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_get_v1op.py b/src/mistralai/client/models/deletelibraryop.py similarity index 77% rename from src/mistralai/client/models/libraries_get_v1op.py rename to src/mistralai/client/models/deletelibraryop.py index 83ae377d..5eb6fc31 100644 --- a/src/mistralai/client/models/libraries_get_v1op.py +++ b/src/mistralai/client/models/deletelibraryop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cd0ce9bf8d51 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class LibrariesGetV1RequestTypedDict(TypedDict): +class DeleteLibraryRequestTypedDict(TypedDict): library_id: str -class LibrariesGetV1Request(BaseModel): +class DeleteLibraryRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py b/src/mistralai/client/models/deletemodelop.py similarity index 79% rename from src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py rename to src/mistralai/client/models/deletemodelop.py index 1cd36128..55c4b242 100644 --- a/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py +++ b/src/mistralai/client/models/deletemodelop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2c494d99a44d from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class DeleteModelV1ModelsModelIDDeleteRequestTypedDict(TypedDict): +class DeleteModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to delete.""" -class DeleteModelV1ModelsModelIDDeleteRequest(BaseModel): +class DeleteModelRequest(BaseModel): model_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/deletemodelout.py b/src/mistralai/client/models/deletemodelout.py index 5aa8b68f..bf22ed17 100644 --- a/src/mistralai/client/models/deletemodelout.py +++ b/src/mistralai/client/models/deletemodelout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ef6a1671c739 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/deltamessage.py b/src/mistralai/client/models/deltamessage.py index fc08d62a..fbb8231a 100644 --- a/src/mistralai/client/models/deltamessage.py +++ b/src/mistralai/client/models/deltamessage.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 68f53d67a140 from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict diff --git a/src/mistralai/client/models/documentlibrarytool.py b/src/mistralai/client/models/documentlibrarytool.py index 21eab39e..ff0f7393 100644 --- a/src/mistralai/client/models/documentlibrarytool.py +++ b/src/mistralai/client/models/documentlibrarytool.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3eb3c218f457 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/documentout.py b/src/mistralai/client/models/documentout.py index 39d0aa2a..3b1a5713 100644 --- a/src/mistralai/client/models/documentout.py +++ b/src/mistralai/client/models/documentout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7a85b9dca506 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/documenttextcontent.py b/src/mistralai/client/models/documenttextcontent.py index b1c1aa07..b6904cb4 100644 --- a/src/mistralai/client/models/documenttextcontent.py +++ b/src/mistralai/client/models/documenttextcontent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e730005e44cb from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/documentupdatein.py b/src/mistralai/client/models/documentupdatein.py index 02022b89..669554de 100644 --- a/src/mistralai/client/models/documentupdatein.py +++ b/src/mistralai/client/models/documentupdatein.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d19c1b26a875 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/documenturlchunk.py b/src/mistralai/client/models/documenturlchunk.py index 00eb5535..304cde2b 100644 --- a/src/mistralai/client/models/documenturlchunk.py +++ b/src/mistralai/client/models/documenturlchunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4309807f6048 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/files_api_routes_download_fileop.py b/src/mistralai/client/models/downloadfileop.py similarity index 77% rename from src/mistralai/client/models/files_api_routes_download_fileop.py rename to src/mistralai/client/models/downloadfileop.py index fa9e491a..fcdc01d6 100644 --- a/src/mistralai/client/models/files_api_routes_download_fileop.py +++ b/src/mistralai/client/models/downloadfileop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4d051f08057d from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class FilesAPIRoutesDownloadFileRequestTypedDict(TypedDict): +class DownloadFileRequestTypedDict(TypedDict): file_id: str -class FilesAPIRoutesDownloadFileRequest(BaseModel): +class DownloadFileRequest(BaseModel): file_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/embeddingdtype.py b/src/mistralai/client/models/embeddingdtype.py index 26eee779..732c4ebe 100644 --- a/src/mistralai/client/models/embeddingdtype.py +++ b/src/mistralai/client/models/embeddingdtype.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 77f9526a78df from __future__ import annotations from typing import Literal diff --git a/src/mistralai/client/models/embeddingrequest.py b/src/mistralai/client/models/embeddingrequest.py index 1dfe97c8..f4537ffa 100644 --- a/src/mistralai/client/models/embeddingrequest.py +++ b/src/mistralai/client/models/embeddingrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: eadbe3f9040c from __future__ import annotations from .embeddingdtype import EmbeddingDtype diff --git a/src/mistralai/client/models/embeddingresponse.py b/src/mistralai/client/models/embeddingresponse.py index 64a28ea9..6ffd6894 100644 --- a/src/mistralai/client/models/embeddingresponse.py +++ b/src/mistralai/client/models/embeddingresponse.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f7d790e84b65 from __future__ import annotations from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict diff --git a/src/mistralai/client/models/embeddingresponsedata.py b/src/mistralai/client/models/embeddingresponsedata.py index ebd0bf7b..a689b290 100644 --- a/src/mistralai/client/models/embeddingresponsedata.py +++ b/src/mistralai/client/models/embeddingresponsedata.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6d6ead6f3803 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/encodingformat.py b/src/mistralai/client/models/encodingformat.py index be6c1a14..4a39d029 100644 --- a/src/mistralai/client/models/encodingformat.py +++ b/src/mistralai/client/models/encodingformat.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b51ec296cc92 from __future__ import annotations from typing import Literal diff --git a/src/mistralai/client/models/entitytype.py b/src/mistralai/client/models/entitytype.py index 9c16f4a1..56d82cbe 100644 --- a/src/mistralai/client/models/entitytype.py +++ b/src/mistralai/client/models/entitytype.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 62d6a6a13288 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/eventout.py b/src/mistralai/client/models/eventout.py index 5e118d45..a0247555 100644 --- a/src/mistralai/client/models/eventout.py +++ b/src/mistralai/client/models/eventout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: da8ad645a9cb from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/file.py b/src/mistralai/client/models/file.py index a8bbc6fa..dbbc00b5 100644 --- a/src/mistralai/client/models/file.py +++ b/src/mistralai/client/models/file.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f972c39edfcf from __future__ import annotations import io diff --git a/src/mistralai/client/models/filechunk.py b/src/mistralai/client/models/filechunk.py index d8b96f69..43ef22f8 100644 --- a/src/mistralai/client/models/filechunk.py +++ b/src/mistralai/client/models/filechunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ff3c2d33ab1e from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/filepurpose.py b/src/mistralai/client/models/filepurpose.py index eef1b089..49a5568f 100644 --- a/src/mistralai/client/models/filepurpose.py +++ b/src/mistralai/client/models/filepurpose.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a11e7f9f2d45 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/fileschema.py b/src/mistralai/client/models/fileschema.py index 9ecde454..cbe9b0d1 100644 --- a/src/mistralai/client/models/fileschema.py +++ b/src/mistralai/client/models/fileschema.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 19cde41ca32a from __future__ import annotations from .filepurpose import FilePurpose diff --git a/src/mistralai/client/models/filesignedurl.py b/src/mistralai/client/models/filesignedurl.py index cbca9847..53dff812 100644 --- a/src/mistralai/client/models/filesignedurl.py +++ b/src/mistralai/client/models/filesignedurl.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a1754c725163 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/fimcompletionrequest.py b/src/mistralai/client/models/fimcompletionrequest.py index c9eca0af..e2f60327 100644 --- a/src/mistralai/client/models/fimcompletionrequest.py +++ b/src/mistralai/client/models/fimcompletionrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cf3558adc3ab from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/fimcompletionresponse.py b/src/mistralai/client/models/fimcompletionresponse.py index 8a2eda0c..1345a116 100644 --- a/src/mistralai/client/models/fimcompletionresponse.py +++ b/src/mistralai/client/models/fimcompletionresponse.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b860d2ba771e from __future__ import annotations from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict diff --git a/src/mistralai/client/models/fimcompletionstreamrequest.py b/src/mistralai/client/models/fimcompletionstreamrequest.py index 29543802..480ed17a 100644 --- a/src/mistralai/client/models/fimcompletionstreamrequest.py +++ b/src/mistralai/client/models/fimcompletionstreamrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1d1ee09f1913 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/finetuneablemodeltype.py b/src/mistralai/client/models/finetuneablemodeltype.py index f5b8b2ed..7b924bd7 100644 --- a/src/mistralai/client/models/finetuneablemodeltype.py +++ b/src/mistralai/client/models/finetuneablemodeltype.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 05e097395df3 from __future__ import annotations from typing import Literal diff --git a/src/mistralai/client/models/ftclassifierlossfunction.py b/src/mistralai/client/models/ftclassifierlossfunction.py index e6781a5e..ccb0f21b 100644 --- a/src/mistralai/client/models/ftclassifierlossfunction.py +++ b/src/mistralai/client/models/ftclassifierlossfunction.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d21e2a36ab1f from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/ftmodelcapabilitiesout.py b/src/mistralai/client/models/ftmodelcapabilitiesout.py index be31aa3c..42269b78 100644 --- a/src/mistralai/client/models/ftmodelcapabilitiesout.py +++ b/src/mistralai/client/models/ftmodelcapabilitiesout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f70517be97d4 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/ftmodelcard.py b/src/mistralai/client/models/ftmodelcard.py index 06f088ec..570e95e2 100644 --- a/src/mistralai/client/models/ftmodelcard.py +++ b/src/mistralai/client/models/ftmodelcard.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c4f15eed2ca2 from __future__ import annotations from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict diff --git a/src/mistralai/client/models/function.py b/src/mistralai/client/models/function.py index 6e2b52ed..3632c1af 100644 --- a/src/mistralai/client/models/function.py +++ b/src/mistralai/client/models/function.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 32275a9d8fee from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/functioncall.py b/src/mistralai/client/models/functioncall.py index 6cb6f26e..527c3ad4 100644 --- a/src/mistralai/client/models/functioncall.py +++ b/src/mistralai/client/models/functioncall.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 393fca552632 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/functioncallentry.py b/src/mistralai/client/models/functioncallentry.py index fce4d387..6ada1d35 100644 --- a/src/mistralai/client/models/functioncallentry.py +++ b/src/mistralai/client/models/functioncallentry.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cd058446c0aa from __future__ import annotations from .functioncallentryarguments import ( diff --git a/src/mistralai/client/models/functioncallentryarguments.py b/src/mistralai/client/models/functioncallentryarguments.py index ac9e6227..afe81b24 100644 --- a/src/mistralai/client/models/functioncallentryarguments.py +++ b/src/mistralai/client/models/functioncallentryarguments.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3df3767a7b93 from __future__ import annotations from typing import Any, Dict, Union diff --git a/src/mistralai/client/models/functioncallevent.py b/src/mistralai/client/models/functioncallevent.py index 8146fa5c..5d871a0e 100644 --- a/src/mistralai/client/models/functioncallevent.py +++ b/src/mistralai/client/models/functioncallevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 23b120b8f122 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/functionname.py b/src/mistralai/client/models/functionname.py index 2a05c1de..07d98a0e 100644 --- a/src/mistralai/client/models/functionname.py +++ b/src/mistralai/client/models/functionname.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 000acafdb0c0 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/functionresultentry.py b/src/mistralai/client/models/functionresultentry.py index a843bf9b..ca73cbb7 100644 --- a/src/mistralai/client/models/functionresultentry.py +++ b/src/mistralai/client/models/functionresultentry.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 213df39bd5e6 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/functiontool.py b/src/mistralai/client/models/functiontool.py index 16abcbf3..13b04496 100644 --- a/src/mistralai/client/models/functiontool.py +++ b/src/mistralai/client/models/functiontool.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2e9ef5800117 from __future__ import annotations from .function import Function, FunctionTypedDict diff --git a/src/mistralai/client/models/agents_api_v1_agents_getop.py b/src/mistralai/client/models/getagentop.py similarity index 77% rename from src/mistralai/client/models/agents_api_v1_agents_getop.py rename to src/mistralai/client/models/getagentop.py index 57abff76..55d8fe68 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_getop.py +++ b/src/mistralai/client/models/getagentop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5a28bb1e727e from __future__ import annotations from mistralai.client.types import ( @@ -14,28 +15,26 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -AgentsAPIV1AgentsGetAgentVersionTypedDict = TypeAliasType( - "AgentsAPIV1AgentsGetAgentVersionTypedDict", Union[int, str] +GetAgentAgentVersionTypedDict = TypeAliasType( + "GetAgentAgentVersionTypedDict", Union[int, str] ) -AgentsAPIV1AgentsGetAgentVersion = TypeAliasType( - "AgentsAPIV1AgentsGetAgentVersion", Union[int, str] -) +GetAgentAgentVersion = TypeAliasType("GetAgentAgentVersion", Union[int, str]) -class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): +class GetAgentRequestTypedDict(TypedDict): agent_id: str - agent_version: NotRequired[Nullable[AgentsAPIV1AgentsGetAgentVersionTypedDict]] + agent_version: NotRequired[Nullable[GetAgentAgentVersionTypedDict]] -class AgentsAPIV1AgentsGetRequest(BaseModel): +class GetAgentRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] agent_version: Annotated[ - OptionalNullable[AgentsAPIV1AgentsGetAgentVersion], + OptionalNullable[GetAgentAgentVersion], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET diff --git a/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py b/src/mistralai/client/models/getagentversionop.py similarity index 81% rename from src/mistralai/client/models/agents_api_v1_agents_get_versionop.py rename to src/mistralai/client/models/getagentversionop.py index edcccda1..77b8a266 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py +++ b/src/mistralai/client/models/getagentversionop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a0db5a6aab1f from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1AgentsGetVersionRequestTypedDict(TypedDict): +class GetAgentVersionRequestTypedDict(TypedDict): agent_id: str version: str -class AgentsAPIV1AgentsGetVersionRequest(BaseModel): +class GetAgentVersionRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py b/src/mistralai/client/models/getbatchjobop.py similarity index 93% rename from src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py rename to src/mistralai/client/models/getbatchjobop.py index 32e34281..792c3e21 100644 --- a/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py +++ b/src/mistralai/client/models/getbatchjobop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 443103fe3b88 from __future__ import annotations from mistralai.client.types import ( @@ -13,12 +14,12 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class JobsAPIRoutesBatchGetBatchJobRequestTypedDict(TypedDict): +class GetBatchJobRequestTypedDict(TypedDict): job_id: str inline: NotRequired[Nullable[bool]] -class JobsAPIRoutesBatchGetBatchJobRequest(BaseModel): +class GetBatchJobRequest(BaseModel): job_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_historyop.py b/src/mistralai/client/models/getconversationhistoryop.py similarity index 80% rename from src/mistralai/client/models/agents_api_v1_conversations_historyop.py rename to src/mistralai/client/models/getconversationhistoryop.py index ba1f8890..c1fbf3de 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_historyop.py +++ b/src/mistralai/client/models/getconversationhistoryop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c863a4cbeb34 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1ConversationsHistoryRequestTypedDict(TypedDict): +class GetConversationHistoryRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation from which we are fetching entries.""" -class AgentsAPIV1ConversationsHistoryRequest(BaseModel): +class GetConversationHistoryRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py b/src/mistralai/client/models/getconversationmessagesop.py similarity index 80% rename from src/mistralai/client/models/agents_api_v1_conversations_messagesop.py rename to src/mistralai/client/models/getconversationmessagesop.py index e05728f2..6666198e 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py +++ b/src/mistralai/client/models/getconversationmessagesop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bb8a90ba7c22 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict): +class GetConversationMessagesRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation from which we are fetching messages.""" -class AgentsAPIV1ConversationsMessagesRequest(BaseModel): +class GetConversationMessagesRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_getop.py b/src/mistralai/client/models/getconversationop.py similarity index 90% rename from src/mistralai/client/models/agents_api_v1_conversations_getop.py rename to src/mistralai/client/models/getconversationop.py index 7308708e..d204d175 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_getop.py +++ b/src/mistralai/client/models/getconversationop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1a622b8337ac from __future__ import annotations from .agentconversation import AgentConversation, AgentConversationTypedDict @@ -9,12 +10,12 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict): +class GetConversationRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation from which we are fetching metadata.""" -class AgentsAPIV1ConversationsGetRequest(BaseModel): +class GetConversationRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py b/src/mistralai/client/models/getdocumentextractedtextsignedurlop.py similarity index 77% rename from src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py rename to src/mistralai/client/models/getdocumentextractedtextsignedurlop.py index 24ed897d..9a71181d 100644 --- a/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py +++ b/src/mistralai/client/models/getdocumentextractedtextsignedurlop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 69099395d631 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict(TypedDict): +class GetDocumentExtractedTextSignedURLRequestTypedDict(TypedDict): library_id: str document_id: str -class LibrariesDocumentsGetExtractedTextSignedURLV1Request(BaseModel): +class GetDocumentExtractedTextSignedURLRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_documents_delete_v1op.py b/src/mistralai/client/models/getdocumentop.py similarity index 82% rename from src/mistralai/client/models/libraries_documents_delete_v1op.py rename to src/mistralai/client/models/getdocumentop.py index bc5ec6e5..d7b07db7 100644 --- a/src/mistralai/client/models/libraries_documents_delete_v1op.py +++ b/src/mistralai/client/models/getdocumentop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: de89ff93d373 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsDeleteV1RequestTypedDict(TypedDict): +class GetDocumentRequestTypedDict(TypedDict): library_id: str document_id: str -class LibrariesDocumentsDeleteV1Request(BaseModel): +class GetDocumentRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_documents_get_status_v1op.py b/src/mistralai/client/models/getdocumentsignedurlop.py similarity index 80% rename from src/mistralai/client/models/libraries_documents_get_status_v1op.py rename to src/mistralai/client/models/getdocumentsignedurlop.py index 92b077d3..e5d56c54 100644 --- a/src/mistralai/client/models/libraries_documents_get_status_v1op.py +++ b/src/mistralai/client/models/getdocumentsignedurlop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b8d95511c6d1 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsGetStatusV1RequestTypedDict(TypedDict): +class GetDocumentSignedURLRequestTypedDict(TypedDict): library_id: str document_id: str -class LibrariesDocumentsGetStatusV1Request(BaseModel): +class GetDocumentSignedURLRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py b/src/mistralai/client/models/getdocumentstatusop.py similarity index 81% rename from src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py rename to src/mistralai/client/models/getdocumentstatusop.py index 350c8e73..4206f593 100644 --- a/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py +++ b/src/mistralai/client/models/getdocumentstatusop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f1f40b8f003f from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsGetSignedURLV1RequestTypedDict(TypedDict): +class GetDocumentStatusRequestTypedDict(TypedDict): library_id: str document_id: str -class LibrariesDocumentsGetSignedURLV1Request(BaseModel): +class GetDocumentStatusRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py b/src/mistralai/client/models/getdocumenttextcontentop.py similarity index 80% rename from src/mistralai/client/models/libraries_documents_get_text_content_v1op.py rename to src/mistralai/client/models/getdocumenttextcontentop.py index 68f9725a..8a7b4aae 100644 --- a/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py +++ b/src/mistralai/client/models/getdocumenttextcontentop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ba23717093ef from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsGetTextContentV1RequestTypedDict(TypedDict): +class GetDocumentTextContentRequestTypedDict(TypedDict): library_id: str document_id: str -class LibrariesDocumentsGetTextContentV1Request(BaseModel): +class GetDocumentTextContentRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/files_api_routes_get_signed_urlop.py b/src/mistralai/client/models/getfilesignedurlop.py similarity index 86% rename from src/mistralai/client/models/files_api_routes_get_signed_urlop.py rename to src/mistralai/client/models/getfilesignedurlop.py index a05f8262..06ed79ee 100644 --- a/src/mistralai/client/models/files_api_routes_get_signed_urlop.py +++ b/src/mistralai/client/models/getfilesignedurlop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1aa50b81c8cf from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,13 +8,13 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class FilesAPIRoutesGetSignedURLRequestTypedDict(TypedDict): +class GetFileSignedURLRequestTypedDict(TypedDict): file_id: str expiry: NotRequired[int] r"""Number of hours before the url becomes invalid. Defaults to 24h""" -class FilesAPIRoutesGetSignedURLRequest(BaseModel): +class GetFileSignedURLRequest(BaseModel): file_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/client/models/getfinetuningjobop.py similarity index 74% rename from src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py rename to src/mistralai/client/models/getfinetuningjobop.py index 8837d262..1fb732f4 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +++ b/src/mistralai/client/models/getfinetuningjobop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: afe997f96d69 from __future__ import annotations from .classifierdetailedjobout import ( @@ -16,26 +17,26 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): +class GetFineTuningJobRequestTypedDict(TypedDict): job_id: str r"""The ID of the job to analyse.""" -class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): +class GetFineTuningJobRequest(BaseModel): job_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] r"""The ID of the job to analyse.""" -JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", +GetFineTuningJobResponseTypedDict = TypeAliasType( + "GetFineTuningJobResponseTypedDict", Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], ) r"""OK""" -JobsAPIRoutesFineTuningGetFineTuningJobResponse = Annotated[ +GetFineTuningJobResponse = Annotated[ Union[ClassifierDetailedJobOut, CompletionDetailedJobOut], Field(discriminator="JOB_TYPE"), ] diff --git a/src/mistralai/client/models/libraries_delete_v1op.py b/src/mistralai/client/models/getlibraryop.py similarity index 78% rename from src/mistralai/client/models/libraries_delete_v1op.py rename to src/mistralai/client/models/getlibraryop.py index fa447de0..bc0b4a23 100644 --- a/src/mistralai/client/models/libraries_delete_v1op.py +++ b/src/mistralai/client/models/getlibraryop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c84a92e23a90 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDeleteV1RequestTypedDict(TypedDict): +class GetLibraryRequestTypedDict(TypedDict): library_id: str -class LibrariesDeleteV1Request(BaseModel): +class GetLibraryRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/githubrepositoryin.py b/src/mistralai/client/models/githubrepositoryin.py index 4e4b4777..e55389c3 100644 --- a/src/mistralai/client/models/githubrepositoryin.py +++ b/src/mistralai/client/models/githubrepositoryin.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: eef26fbd2876 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/githubrepositoryout.py b/src/mistralai/client/models/githubrepositoryout.py index 1f738708..514df01c 100644 --- a/src/mistralai/client/models/githubrepositoryout.py +++ b/src/mistralai/client/models/githubrepositoryout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d2434a167623 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/httpvalidationerror.py b/src/mistralai/client/models/httpvalidationerror.py index 34d9b543..e7f0a35b 100644 --- a/src/mistralai/client/models/httpvalidationerror.py +++ b/src/mistralai/client/models/httpvalidationerror.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4099f568a6f8 from __future__ import annotations from .validationerror import ValidationError diff --git a/src/mistralai/client/models/imagegenerationtool.py b/src/mistralai/client/models/imagegenerationtool.py index c5dbda3f..680c6ce2 100644 --- a/src/mistralai/client/models/imagegenerationtool.py +++ b/src/mistralai/client/models/imagegenerationtool.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e1532275faa0 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/imageurl.py b/src/mistralai/client/models/imageurl.py index 6e61d1ae..4ff13b1c 100644 --- a/src/mistralai/client/models/imageurl.py +++ b/src/mistralai/client/models/imageurl.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e4bbf5881fbf from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/imageurlchunk.py b/src/mistralai/client/models/imageurlchunk.py index 9968ed74..993185cc 100644 --- a/src/mistralai/client/models/imageurlchunk.py +++ b/src/mistralai/client/models/imageurlchunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 746fde62f637 from __future__ import annotations from .imageurl import ImageURL, ImageURLTypedDict diff --git a/src/mistralai/client/models/inputentries.py b/src/mistralai/client/models/inputentries.py index 8ae29837..dc989295 100644 --- a/src/mistralai/client/models/inputentries.py +++ b/src/mistralai/client/models/inputentries.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 44727997dacb from __future__ import annotations from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict diff --git a/src/mistralai/client/models/inputs.py b/src/mistralai/client/models/inputs.py index 2b8b2f5f..cfcdeb3d 100644 --- a/src/mistralai/client/models/inputs.py +++ b/src/mistralai/client/models/inputs.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 84a8007518c7 from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict diff --git a/src/mistralai/client/models/instructrequest.py b/src/mistralai/client/models/instructrequest.py index 73d482d8..e5f9cccf 100644 --- a/src/mistralai/client/models/instructrequest.py +++ b/src/mistralai/client/models/instructrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6d3ad9f896c7 from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict diff --git a/src/mistralai/client/models/jobin.py b/src/mistralai/client/models/jobin.py index 23a431c9..b3cb8998 100644 --- a/src/mistralai/client/models/jobin.py +++ b/src/mistralai/client/models/jobin.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f4d176123ccc from __future__ import annotations from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict @@ -54,7 +55,6 @@ class JobInTypedDict(TypedDict): model: str - r"""The name of the model to fine-tune.""" hyperparameters: HyperparametersTypedDict training_files: NotRequired[List[TrainingFileTypedDict]] validation_files: NotRequired[Nullable[List[str]]] @@ -73,7 +73,6 @@ class JobInTypedDict(TypedDict): class JobIn(BaseModel): model: str - r"""The name of the model to fine-tune.""" hyperparameters: Hyperparameters diff --git a/src/mistralai/client/models/jobmetadataout.py b/src/mistralai/client/models/jobmetadataout.py index f91e30c0..1d386539 100644 --- a/src/mistralai/client/models/jobmetadataout.py +++ b/src/mistralai/client/models/jobmetadataout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 805f41e3292a from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/jobsout.py b/src/mistralai/client/models/jobsout.py index 7727d56c..a4127a5d 100644 --- a/src/mistralai/client/models/jobsout.py +++ b/src/mistralai/client/models/jobsout.py @@ -1,10 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 22e91e9631a9 from __future__ import annotations from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic from pydantic import Field +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -19,13 +23,10 @@ ] -JobsOutObject = Literal["list",] - - class JobsOutTypedDict(TypedDict): total: int data: NotRequired[List[JobsOutDataTypedDict]] - object: NotRequired[JobsOutObject] + object: Literal["list"] class JobsOut(BaseModel): @@ -33,4 +34,7 @@ class JobsOut(BaseModel): data: Optional[List[JobsOutData]] = None - object: Optional[JobsOutObject] = "list" + OBJECT: Annotated[ + Annotated[Optional[Literal["list"]], AfterValidator(validate_const("list"))], + pydantic.Field(alias="object"), + ] = "list" diff --git a/src/mistralai/client/models/jsonschema.py b/src/mistralai/client/models/jsonschema.py index db2fa55b..948c94ed 100644 --- a/src/mistralai/client/models/jsonschema.py +++ b/src/mistralai/client/models/jsonschema.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e1fc1d8a434a from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/legacyjobmetadataout.py b/src/mistralai/client/models/legacyjobmetadataout.py index 155ecea7..4453c157 100644 --- a/src/mistralai/client/models/legacyjobmetadataout.py +++ b/src/mistralai/client/models/legacyjobmetadataout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4f44aa38c864 from __future__ import annotations from mistralai.client.types import ( @@ -8,12 +9,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -LegacyJobMetadataOutObject = Literal["job.metadata",] +from typing_extensions import Annotated, NotRequired, TypedDict class LegacyJobMetadataOutTypedDict(TypedDict): @@ -36,7 +37,7 @@ class LegacyJobMetadataOutTypedDict(TypedDict): r"""The number of complete passes through the entire training dataset.""" training_steps: NotRequired[Nullable[int]] r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - object: NotRequired[LegacyJobMetadataOutObject] + object: Literal["job.metadata"] class LegacyJobMetadataOut(BaseModel): @@ -70,7 +71,13 @@ class LegacyJobMetadataOut(BaseModel): training_steps: OptionalNullable[int] = UNSET r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - object: Optional[LegacyJobMetadataOutObject] = "job.metadata" + OBJECT: Annotated[ + Annotated[ + Optional[Literal["job.metadata"]], + AfterValidator(validate_const("job.metadata")), + ], + pydantic.Field(alias="object"), + ] = "job.metadata" @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/client/models/libraryin.py b/src/mistralai/client/models/libraryin.py index a7b36158..1a71d410 100644 --- a/src/mistralai/client/models/libraryin.py +++ b/src/mistralai/client/models/libraryin.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6147d5df71d9 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/libraryinupdate.py b/src/mistralai/client/models/libraryinupdate.py index f0241ba1..328b2de3 100644 --- a/src/mistralai/client/models/libraryinupdate.py +++ b/src/mistralai/client/models/libraryinupdate.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 300a6bb02e6e from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/libraryout.py b/src/mistralai/client/models/libraryout.py index d1953f16..c7ab7b8d 100644 --- a/src/mistralai/client/models/libraryout.py +++ b/src/mistralai/client/models/libraryout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4e608c7aafc4 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py b/src/mistralai/client/models/listagentaliasesop.py similarity index 75% rename from src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py rename to src/mistralai/client/models/listagentaliasesop.py index b9770fff..83c6d176 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py +++ b/src/mistralai/client/models/listagentaliasesop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ff038766a902 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1AgentsListVersionAliasesRequestTypedDict(TypedDict): +class ListAgentAliasesRequestTypedDict(TypedDict): agent_id: str -class AgentsAPIV1AgentsListVersionAliasesRequest(BaseModel): +class ListAgentAliasesRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_listop.py b/src/mistralai/client/models/listagentsop.py similarity index 82% rename from src/mistralai/client/models/agents_api_v1_agents_listop.py rename to src/mistralai/client/models/listagentsop.py index 119f5123..863fc13a 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_listop.py +++ b/src/mistralai/client/models/listagentsop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a573a873c404 from __future__ import annotations from .requestsource import RequestSource @@ -15,7 +16,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class AgentsAPIV1AgentsListRequestTypedDict(TypedDict): +class ListAgentsRequestTypedDict(TypedDict): page: NotRequired[int] r"""Page number (0-indexed)""" page_size: NotRequired[int] @@ -23,11 +24,14 @@ class AgentsAPIV1AgentsListRequestTypedDict(TypedDict): deployment_chat: NotRequired[Nullable[bool]] sources: NotRequired[Nullable[List[RequestSource]]] name: NotRequired[Nullable[str]] + r"""Filter by agent name""" + search: NotRequired[Nullable[str]] + r"""Search agents by name or ID""" id: NotRequired[Nullable[str]] metadata: NotRequired[Nullable[Dict[str, Any]]] -class AgentsAPIV1AgentsListRequest(BaseModel): +class ListAgentsRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -54,6 +58,13 @@ class AgentsAPIV1AgentsListRequest(BaseModel): OptionalNullable[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET + r"""Filter by agent name""" + + search: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""Search agents by name or ID""" id: Annotated[ OptionalNullable[str], @@ -73,10 +84,18 @@ def serialize_model(self, handler): "deployment_chat", "sources", "name", + "search", + "id", + "metadata", + ] + nullable_fields = [ + "deployment_chat", + "sources", + "name", + "search", "id", "metadata", ] - nullable_fields = ["deployment_chat", "sources", "name", "id", "metadata"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py b/src/mistralai/client/models/listagentversionsop.py similarity index 88% rename from src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py rename to src/mistralai/client/models/listagentversionsop.py index 813335f9..613d3d85 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py +++ b/src/mistralai/client/models/listagentversionsop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ccc5fb48e78f from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,7 +8,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class AgentsAPIV1AgentsListVersionsRequestTypedDict(TypedDict): +class ListAgentVersionsRequestTypedDict(TypedDict): agent_id: str page: NotRequired[int] r"""Page number (0-indexed)""" @@ -15,7 +16,7 @@ class AgentsAPIV1AgentsListVersionsRequestTypedDict(TypedDict): r"""Number of versions per page""" -class AgentsAPIV1AgentsListVersionsRequest(BaseModel): +class ListAgentVersionsRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py b/src/mistralai/client/models/listbatchjobsop.py similarity index 87% rename from src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py rename to src/mistralai/client/models/listbatchjobsop.py index 3557e773..5322df81 100644 --- a/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py +++ b/src/mistralai/client/models/listbatchjobsop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f49af453f5e6 from __future__ import annotations from .batchjobstatus import BatchJobStatus @@ -12,11 +13,17 @@ ) from mistralai.client.utils import FieldMetadata, QueryParamMetadata from pydantic import model_serializer -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict -class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): +OrderBy = Literal[ + "created", + "-created", +] + + +class ListBatchJobsRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] model: NotRequired[Nullable[str]] @@ -25,9 +32,10 @@ class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): created_after: NotRequired[Nullable[datetime]] created_by_me: NotRequired[bool] status: NotRequired[Nullable[List[BatchJobStatus]]] + order_by: NotRequired[OrderBy] -class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): +class ListBatchJobsRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -68,6 +76,11 @@ class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET + order_by: Annotated[ + Optional[OrderBy], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = "-created" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -79,6 +92,7 @@ def serialize_model(self, handler): "created_after", "created_by_me", "status", + "order_by", ] nullable_fields = ["model", "agent_id", "metadata", "created_after", "status"] null_default_fields = [] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_listop.py b/src/mistralai/client/models/listconversationsop.py similarity index 85% rename from src/mistralai/client/models/agents_api_v1_conversations_listop.py rename to src/mistralai/client/models/listconversationsop.py index aae9c74e..1c9a347c 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_listop.py +++ b/src/mistralai/client/models/listconversationsop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d6007f6c1643 from __future__ import annotations from .agentconversation import AgentConversation, AgentConversationTypedDict @@ -16,13 +17,13 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -class AgentsAPIV1ConversationsListRequestTypedDict(TypedDict): +class ListConversationsRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] metadata: NotRequired[Nullable[Dict[str, Any]]] -class AgentsAPIV1ConversationsListRequest(BaseModel): +class ListConversationsRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -69,12 +70,12 @@ def serialize_model(self, handler): return m -AgentsAPIV1ConversationsListResponseTypedDict = TypeAliasType( - "AgentsAPIV1ConversationsListResponseTypedDict", +ListConversationsResponseTypedDict = TypeAliasType( + "ListConversationsResponseTypedDict", Union[AgentConversationTypedDict, ModelConversationTypedDict], ) -AgentsAPIV1ConversationsListResponse = TypeAliasType( - "AgentsAPIV1ConversationsListResponse", Union[AgentConversation, ModelConversation] +ListConversationsResponse = TypeAliasType( + "ListConversationsResponse", Union[AgentConversation, ModelConversation] ) diff --git a/src/mistralai/client/models/listdocumentout.py b/src/mistralai/client/models/listdocumentout.py index 24969a0f..a636b3de 100644 --- a/src/mistralai/client/models/listdocumentout.py +++ b/src/mistralai/client/models/listdocumentout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b2c96075ce00 from __future__ import annotations from .documentout import DocumentOut, DocumentOutTypedDict diff --git a/src/mistralai/client/models/libraries_documents_list_v1op.py b/src/mistralai/client/models/listdocumentsop.py similarity index 95% rename from src/mistralai/client/models/libraries_documents_list_v1op.py rename to src/mistralai/client/models/listdocumentsop.py index 5dec3385..0f7c4584 100644 --- a/src/mistralai/client/models/libraries_documents_list_v1op.py +++ b/src/mistralai/client/models/listdocumentsop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3e42bdc15383 from __future__ import annotations from mistralai.client.types import ( @@ -14,7 +15,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class LibrariesDocumentsListV1RequestTypedDict(TypedDict): +class ListDocumentsRequestTypedDict(TypedDict): library_id: str search: NotRequired[Nullable[str]] page_size: NotRequired[int] @@ -24,7 +25,7 @@ class LibrariesDocumentsListV1RequestTypedDict(TypedDict): sort_order: NotRequired[str] -class LibrariesDocumentsListV1Request(BaseModel): +class ListDocumentsRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/files_api_routes_list_filesop.py b/src/mistralai/client/models/listfilesop.py similarity index 96% rename from src/mistralai/client/models/files_api_routes_list_filesop.py rename to src/mistralai/client/models/listfilesop.py index ace99631..a9af5c70 100644 --- a/src/mistralai/client/models/files_api_routes_list_filesop.py +++ b/src/mistralai/client/models/listfilesop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e5bd46ac0145 from __future__ import annotations from .filepurpose import FilePurpose @@ -17,7 +18,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): +class ListFilesRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] include_total: NotRequired[bool] @@ -28,7 +29,7 @@ class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): mimetypes: NotRequired[Nullable[List[str]]] -class FilesAPIRoutesListFilesRequest(BaseModel): +class ListFilesRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), diff --git a/src/mistralai/client/models/listfilesout.py b/src/mistralai/client/models/listfilesout.py index 1db17c40..460822f7 100644 --- a/src/mistralai/client/models/listfilesout.py +++ b/src/mistralai/client/models/listfilesout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ae5fa21b141c from __future__ import annotations from .fileschema import FileSchema, FileSchemaTypedDict diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/client/models/listfinetuningjobsop.py similarity index 93% rename from src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py rename to src/mistralai/client/models/listfinetuningjobsop.py index 8c19bacb..8712c3fa 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +++ b/src/mistralai/client/models/listfinetuningjobsop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b77fe203b929 from __future__ import annotations from datetime import datetime @@ -15,7 +16,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -JobsAPIRoutesFineTuningGetFineTuningJobsStatus = Literal[ +ListFineTuningJobsStatus = Literal[ "QUEUED", "STARTED", "VALIDATING", @@ -30,7 +31,7 @@ r"""The current job state to filter on. When set, the other results are not displayed.""" -class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): +class ListFineTuningJobsRequestTypedDict(TypedDict): page: NotRequired[int] r"""The page number of the results to be returned.""" page_size: NotRequired[int] @@ -42,7 +43,7 @@ class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): created_before: NotRequired[Nullable[datetime]] created_by_me: NotRequired[bool] r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" - status: NotRequired[Nullable[JobsAPIRoutesFineTuningGetFineTuningJobsStatus]] + status: NotRequired[Nullable[ListFineTuningJobsStatus]] r"""The current job state to filter on. When set, the other results are not displayed.""" wandb_project: NotRequired[Nullable[str]] r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" @@ -52,7 +53,7 @@ class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): r"""The model suffix to filter on. When set, the other results are not displayed.""" -class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): +class ListFineTuningJobsRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -89,7 +90,7 @@ class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" status: Annotated[ - OptionalNullable[JobsAPIRoutesFineTuningGetFineTuningJobsStatus], + OptionalNullable[ListFineTuningJobsStatus], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET r"""The current job state to filter on. When set, the other results are not displayed.""" diff --git a/src/mistralai/client/models/libraries_share_list_v1op.py b/src/mistralai/client/models/listlibraryaccessesop.py similarity index 76% rename from src/mistralai/client/models/libraries_share_list_v1op.py rename to src/mistralai/client/models/listlibraryaccessesop.py index fd5d9d33..2206310f 100644 --- a/src/mistralai/client/models/libraries_share_list_v1op.py +++ b/src/mistralai/client/models/listlibraryaccessesop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 581b332626b7 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class LibrariesShareListV1RequestTypedDict(TypedDict): +class ListLibraryAccessesRequestTypedDict(TypedDict): library_id: str -class LibrariesShareListV1Request(BaseModel): +class ListLibraryAccessesRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/listlibraryout.py b/src/mistralai/client/models/listlibraryout.py index 24aaa1a9..39fa459f 100644 --- a/src/mistralai/client/models/listlibraryout.py +++ b/src/mistralai/client/models/listlibraryout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cb78c529e763 from __future__ import annotations from .libraryout import LibraryOut, LibraryOutTypedDict diff --git a/src/mistralai/client/models/listsharingout.py b/src/mistralai/client/models/listsharingout.py index f139813f..443ad0d6 100644 --- a/src/mistralai/client/models/listsharingout.py +++ b/src/mistralai/client/models/listsharingout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ee708a7ccdad from __future__ import annotations from .sharingout import SharingOut, SharingOutTypedDict diff --git a/src/mistralai/client/models/messageentries.py b/src/mistralai/client/models/messageentries.py index 9b1706de..a95098e0 100644 --- a/src/mistralai/client/models/messageentries.py +++ b/src/mistralai/client/models/messageentries.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e13f9009902b from __future__ import annotations from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict diff --git a/src/mistralai/client/models/messageinputcontentchunks.py b/src/mistralai/client/models/messageinputcontentchunks.py index e90d8aa0..63cf14e7 100644 --- a/src/mistralai/client/models/messageinputcontentchunks.py +++ b/src/mistralai/client/models/messageinputcontentchunks.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 01025c12866a from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict diff --git a/src/mistralai/client/models/messageinputentry.py b/src/mistralai/client/models/messageinputentry.py index a72319cf..15046d25 100644 --- a/src/mistralai/client/models/messageinputentry.py +++ b/src/mistralai/client/models/messageinputentry.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c0a4b5179095 from __future__ import annotations from .messageinputcontentchunks import ( diff --git a/src/mistralai/client/models/messageoutputcontentchunks.py b/src/mistralai/client/models/messageoutputcontentchunks.py index 136a7608..def7a4d2 100644 --- a/src/mistralai/client/models/messageoutputcontentchunks.py +++ b/src/mistralai/client/models/messageoutputcontentchunks.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2ed248515035 from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict diff --git a/src/mistralai/client/models/messageoutputentry.py b/src/mistralai/client/models/messageoutputentry.py index d52e4e3e..8752fc36 100644 --- a/src/mistralai/client/models/messageoutputentry.py +++ b/src/mistralai/client/models/messageoutputentry.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a07577d2268d from __future__ import annotations from .messageoutputcontentchunks import ( diff --git a/src/mistralai/client/models/messageoutputevent.py b/src/mistralai/client/models/messageoutputevent.py index 447e3867..39c10139 100644 --- a/src/mistralai/client/models/messageoutputevent.py +++ b/src/mistralai/client/models/messageoutputevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a2bbf63615c6 from __future__ import annotations from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict diff --git a/src/mistralai/client/models/metricout.py b/src/mistralai/client/models/metricout.py index f8027a69..5705c712 100644 --- a/src/mistralai/client/models/metricout.py +++ b/src/mistralai/client/models/metricout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 92d33621dda7 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/mistralerror.py b/src/mistralai/client/models/mistralerror.py index 28cfd22d..862a6be8 100644 --- a/src/mistralai/client/models/mistralerror.py +++ b/src/mistralai/client/models/mistralerror.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 68ffd8394c2e import httpx from typing import Optional diff --git a/src/mistralai/client/models/mistralpromptmode.py b/src/mistralai/client/models/mistralpromptmode.py index 7008fc05..9b91323e 100644 --- a/src/mistralai/client/models/mistralpromptmode.py +++ b/src/mistralai/client/models/mistralpromptmode.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 95abc4ec799a from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/modelcapabilities.py b/src/mistralai/client/models/modelcapabilities.py index a6db80e7..c329efbc 100644 --- a/src/mistralai/client/models/modelcapabilities.py +++ b/src/mistralai/client/models/modelcapabilities.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 64d8a422ea29 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/modelconversation.py b/src/mistralai/client/models/modelconversation.py index d348072a..c0bacb7f 100644 --- a/src/mistralai/client/models/modelconversation.py +++ b/src/mistralai/client/models/modelconversation.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fea0a651f888 from __future__ import annotations from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict diff --git a/src/mistralai/client/models/modellist.py b/src/mistralai/client/models/modellist.py index b357ae84..c122122c 100644 --- a/src/mistralai/client/models/modellist.py +++ b/src/mistralai/client/models/modellist.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 00693c7eec60 from __future__ import annotations from .basemodelcard import BaseModelCard, BaseModelCardTypedDict diff --git a/src/mistralai/client/models/moderationobject.py b/src/mistralai/client/models/moderationobject.py index a6b44b96..9aa4eb15 100644 --- a/src/mistralai/client/models/moderationobject.py +++ b/src/mistralai/client/models/moderationobject.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 132faad0549a from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/moderationresponse.py b/src/mistralai/client/models/moderationresponse.py index 288c8d82..a8a8ec3d 100644 --- a/src/mistralai/client/models/moderationresponse.py +++ b/src/mistralai/client/models/moderationresponse.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 06bab279cb31 from __future__ import annotations from .moderationobject import ModerationObject, ModerationObjectTypedDict diff --git a/src/mistralai/client/models/no_response_error.py b/src/mistralai/client/models/no_response_error.py index 1deab64b..7705f194 100644 --- a/src/mistralai/client/models/no_response_error.py +++ b/src/mistralai/client/models/no_response_error.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2849e0a482e2 from dataclasses import dataclass diff --git a/src/mistralai/client/models/ocrimageobject.py b/src/mistralai/client/models/ocrimageobject.py index e97fa8df..e95b67e1 100644 --- a/src/mistralai/client/models/ocrimageobject.py +++ b/src/mistralai/client/models/ocrimageobject.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 685faeb41a80 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/ocrpagedimensions.py b/src/mistralai/client/models/ocrpagedimensions.py index f4fc11e0..847205c6 100644 --- a/src/mistralai/client/models/ocrpagedimensions.py +++ b/src/mistralai/client/models/ocrpagedimensions.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 02f763afbc9f from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/ocrpageobject.py b/src/mistralai/client/models/ocrpageobject.py index f8b43601..4f4ccf43 100644 --- a/src/mistralai/client/models/ocrpageobject.py +++ b/src/mistralai/client/models/ocrpageobject.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 07a099f89487 from __future__ import annotations from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict diff --git a/src/mistralai/client/models/ocrrequest.py b/src/mistralai/client/models/ocrrequest.py index 03a6028c..18b899dd 100644 --- a/src/mistralai/client/models/ocrrequest.py +++ b/src/mistralai/client/models/ocrrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 36f204c64074 from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict diff --git a/src/mistralai/client/models/ocrresponse.py b/src/mistralai/client/models/ocrresponse.py index 2813a1ca..0a36e975 100644 --- a/src/mistralai/client/models/ocrresponse.py +++ b/src/mistralai/client/models/ocrresponse.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2fdfc881ca56 from __future__ import annotations from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict diff --git a/src/mistralai/client/models/ocrtableobject.py b/src/mistralai/client/models/ocrtableobject.py index f3b0bc45..e32ad894 100644 --- a/src/mistralai/client/models/ocrtableobject.py +++ b/src/mistralai/client/models/ocrtableobject.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d74dd0d2ddac from __future__ import annotations from mistralai.client.types import BaseModel, UnrecognizedStr diff --git a/src/mistralai/client/models/ocrusageinfo.py b/src/mistralai/client/models/ocrusageinfo.py index 62f07fd4..a421d850 100644 --- a/src/mistralai/client/models/ocrusageinfo.py +++ b/src/mistralai/client/models/ocrusageinfo.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 272b7e1785d5 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/outputcontentchunks.py b/src/mistralai/client/models/outputcontentchunks.py index ad0c087e..1a115fe8 100644 --- a/src/mistralai/client/models/outputcontentchunks.py +++ b/src/mistralai/client/models/outputcontentchunks.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9ad9741f4975 from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict diff --git a/src/mistralai/client/models/paginationinfo.py b/src/mistralai/client/models/paginationinfo.py index 0252f448..2b9dab62 100644 --- a/src/mistralai/client/models/paginationinfo.py +++ b/src/mistralai/client/models/paginationinfo.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 48851e82d67e from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/prediction.py b/src/mistralai/client/models/prediction.py index f2c5d9c6..52f4adf1 100644 --- a/src/mistralai/client/models/prediction.py +++ b/src/mistralai/client/models/prediction.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1cc842a069a5 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/processingstatusout.py b/src/mistralai/client/models/processingstatusout.py index 031f386f..3acadcc9 100644 --- a/src/mistralai/client/models/processingstatusout.py +++ b/src/mistralai/client/models/processingstatusout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3df842c4140f from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/realtimetranscriptionerror.py b/src/mistralai/client/models/realtimetranscriptionerror.py index e6a889de..f8f2d3da 100644 --- a/src/mistralai/client/models/realtimetranscriptionerror.py +++ b/src/mistralai/client/models/realtimetranscriptionerror.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8c2267378f48 from __future__ import annotations from .realtimetranscriptionerrordetail import ( diff --git a/src/mistralai/client/models/realtimetranscriptionerrordetail.py b/src/mistralai/client/models/realtimetranscriptionerrordetail.py index e1f48379..cec1f6ea 100644 --- a/src/mistralai/client/models/realtimetranscriptionerrordetail.py +++ b/src/mistralai/client/models/realtimetranscriptionerrordetail.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5bd25cdf9c7a from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/realtimetranscriptionsession.py b/src/mistralai/client/models/realtimetranscriptionsession.py index 3a330651..d20d0d8c 100644 --- a/src/mistralai/client/models/realtimetranscriptionsession.py +++ b/src/mistralai/client/models/realtimetranscriptionsession.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 02517fa5411a from __future__ import annotations from .audioformat import AudioFormat, AudioFormatTypedDict diff --git a/src/mistralai/client/models/realtimetranscriptionsessioncreated.py b/src/mistralai/client/models/realtimetranscriptionsessioncreated.py index cc6d5028..c4fa5774 100644 --- a/src/mistralai/client/models/realtimetranscriptionsessioncreated.py +++ b/src/mistralai/client/models/realtimetranscriptionsessioncreated.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4e3731f63a3c from __future__ import annotations from .realtimetranscriptionsession import ( diff --git a/src/mistralai/client/models/realtimetranscriptionsessionupdated.py b/src/mistralai/client/models/realtimetranscriptionsessionupdated.py index 3da23595..a61fb05e 100644 --- a/src/mistralai/client/models/realtimetranscriptionsessionupdated.py +++ b/src/mistralai/client/models/realtimetranscriptionsessionupdated.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 686dc4f2450f from __future__ import annotations from .realtimetranscriptionsession import ( diff --git a/src/mistralai/client/models/referencechunk.py b/src/mistralai/client/models/referencechunk.py index 4c703b81..7634d8ae 100644 --- a/src/mistralai/client/models/referencechunk.py +++ b/src/mistralai/client/models/referencechunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 921acd3a224a from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/libraries_documents_get_v1op.py b/src/mistralai/client/models/reprocessdocumentop.py similarity index 81% rename from src/mistralai/client/models/libraries_documents_get_v1op.py rename to src/mistralai/client/models/reprocessdocumentop.py index a67e687e..48a4b72b 100644 --- a/src/mistralai/client/models/libraries_documents_get_v1op.py +++ b/src/mistralai/client/models/reprocessdocumentop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b2913a7aa5c9 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsGetV1RequestTypedDict(TypedDict): +class ReprocessDocumentRequestTypedDict(TypedDict): library_id: str document_id: str -class LibrariesDocumentsGetV1Request(BaseModel): +class ReprocessDocumentRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/requestsource.py b/src/mistralai/client/models/requestsource.py index 7b0a35c4..fc4433cb 100644 --- a/src/mistralai/client/models/requestsource.py +++ b/src/mistralai/client/models/requestsource.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3f2774d9e609 from __future__ import annotations from typing import Literal diff --git a/src/mistralai/client/models/responsedoneevent.py b/src/mistralai/client/models/responsedoneevent.py index 283baa11..ed331ff1 100644 --- a/src/mistralai/client/models/responsedoneevent.py +++ b/src/mistralai/client/models/responsedoneevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cf8a686bf82c from __future__ import annotations from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict diff --git a/src/mistralai/client/models/responseerrorevent.py b/src/mistralai/client/models/responseerrorevent.py index ee078963..8f196a52 100644 --- a/src/mistralai/client/models/responseerrorevent.py +++ b/src/mistralai/client/models/responseerrorevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b286d74e8724 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/responseformat.py b/src/mistralai/client/models/responseformat.py index 5899b017..409b80d6 100644 --- a/src/mistralai/client/models/responseformat.py +++ b/src/mistralai/client/models/responseformat.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6ab8bc8d22c0 from __future__ import annotations from .jsonschema import JSONSchema, JSONSchemaTypedDict diff --git a/src/mistralai/client/models/responseformats.py b/src/mistralai/client/models/responseformats.py index b98cd098..21345778 100644 --- a/src/mistralai/client/models/responseformats.py +++ b/src/mistralai/client/models/responseformats.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c4462a05fb08 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/responsestartedevent.py b/src/mistralai/client/models/responsestartedevent.py index 0841fd58..256d2a6c 100644 --- a/src/mistralai/client/models/responsestartedevent.py +++ b/src/mistralai/client/models/responsestartedevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 24f54ee8b0f2 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/responsevalidationerror.py b/src/mistralai/client/models/responsevalidationerror.py index bab5d0b7..1ed0d552 100644 --- a/src/mistralai/client/models/responsevalidationerror.py +++ b/src/mistralai/client/models/responsevalidationerror.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c244a88981e0 import httpx from typing import Optional diff --git a/src/mistralai/client/models/agents_api_v1_conversations_restartop.py b/src/mistralai/client/models/restartconversationop.py similarity index 87% rename from src/mistralai/client/models/agents_api_v1_conversations_restartop.py rename to src/mistralai/client/models/restartconversationop.py index 8bce3ce5..b09eaed5 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_restartop.py +++ b/src/mistralai/client/models/restartconversationop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2f6f3e4bbfd8 from __future__ import annotations from .conversationrestartrequest import ( @@ -10,13 +11,13 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict): +class RestartConversationRequestTypedDict(TypedDict): conversation_id: str r"""ID of the original conversation which is being restarted.""" conversation_restart_request: ConversationRestartRequestTypedDict -class AgentsAPIV1ConversationsRestartRequest(BaseModel): +class RestartConversationRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py b/src/mistralai/client/models/restartconversationstreamop.py similarity index 87% rename from src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py rename to src/mistralai/client/models/restartconversationstreamop.py index 9b489ab4..3b2025f5 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py +++ b/src/mistralai/client/models/restartconversationstreamop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 16dc9ee5bf22 from __future__ import annotations from .conversationrestartstreamrequest import ( @@ -10,13 +11,13 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict): +class RestartConversationStreamRequestTypedDict(TypedDict): conversation_id: str r"""ID of the original conversation which is being restarted.""" conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict -class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel): +class RestartConversationStreamRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/files_api_routes_retrieve_fileop.py b/src/mistralai/client/models/retrievefileop.py similarity index 77% rename from src/mistralai/client/models/files_api_routes_retrieve_fileop.py rename to src/mistralai/client/models/retrievefileop.py index 4a9678e5..edd50e57 100644 --- a/src/mistralai/client/models/files_api_routes_retrieve_fileop.py +++ b/src/mistralai/client/models/retrievefileop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ee73efdf9180 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class FilesAPIRoutesRetrieveFileRequestTypedDict(TypedDict): +class RetrieveFileRequestTypedDict(TypedDict): file_id: str -class FilesAPIRoutesRetrieveFileRequest(BaseModel): +class RetrieveFileRequest(BaseModel): file_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/retrievefileout.py b/src/mistralai/client/models/retrievefileout.py index ffd0617a..2abf2161 100644 --- a/src/mistralai/client/models/retrievefileout.py +++ b/src/mistralai/client/models/retrievefileout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8bb5859aa0d0 from __future__ import annotations from .filepurpose import FilePurpose diff --git a/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/client/models/retrievemodelop.py similarity index 89% rename from src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py rename to src/mistralai/client/models/retrievemodelop.py index 96e5b57f..b4334e9a 100644 --- a/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py +++ b/src/mistralai/client/models/retrievemodelop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d883baa79c9e from __future__ import annotations from .basemodelcard import BaseModelCard, BaseModelCardTypedDict @@ -10,12 +11,12 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -class RetrieveModelV1ModelsModelIDGetRequestTypedDict(TypedDict): +class RetrieveModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to retrieve.""" -class RetrieveModelV1ModelsModelIDGetRequest(BaseModel): +class RetrieveModelRequest(BaseModel): model_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/sampletype.py b/src/mistralai/client/models/sampletype.py index e0727b02..dfec7cce 100644 --- a/src/mistralai/client/models/sampletype.py +++ b/src/mistralai/client/models/sampletype.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a9309422fed7 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/sdkerror.py b/src/mistralai/client/models/sdkerror.py index ceb03c48..101e1e6a 100644 --- a/src/mistralai/client/models/sdkerror.py +++ b/src/mistralai/client/models/sdkerror.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 12f991dad510 import httpx from typing import Optional diff --git a/src/mistralai/client/models/security.py b/src/mistralai/client/models/security.py index 1b67229b..4fa8b4b2 100644 --- a/src/mistralai/client/models/security.py +++ b/src/mistralai/client/models/security.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c2ca0e2a36b7 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/shareenum.py b/src/mistralai/client/models/shareenum.py index ca1b9624..08ffeb7e 100644 --- a/src/mistralai/client/models/shareenum.py +++ b/src/mistralai/client/models/shareenum.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a0e2a7a16bf8 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/sharingdelete.py b/src/mistralai/client/models/sharingdelete.py index d659342f..202732cf 100644 --- a/src/mistralai/client/models/sharingdelete.py +++ b/src/mistralai/client/models/sharingdelete.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f5ecce372e06 from __future__ import annotations from .entitytype import EntityType diff --git a/src/mistralai/client/models/sharingin.py b/src/mistralai/client/models/sharingin.py index 630f4c70..8cc3e896 100644 --- a/src/mistralai/client/models/sharingin.py +++ b/src/mistralai/client/models/sharingin.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e953dda09c02 from __future__ import annotations from .entitytype import EntityType diff --git a/src/mistralai/client/models/sharingout.py b/src/mistralai/client/models/sharingout.py index 195701d1..77807154 100644 --- a/src/mistralai/client/models/sharingout.py +++ b/src/mistralai/client/models/sharingout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0b8804effb5c from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/source.py b/src/mistralai/client/models/source.py index 181b327e..fcea403c 100644 --- a/src/mistralai/client/models/source.py +++ b/src/mistralai/client/models/source.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fcee60a4ea0d from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/ssetypes.py b/src/mistralai/client/models/ssetypes.py index ac2722f1..0add960b 100644 --- a/src/mistralai/client/models/ssetypes.py +++ b/src/mistralai/client/models/ssetypes.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1733e4765106 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/client/models/startfinetuningjobop.py similarity index 72% rename from src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py rename to src/mistralai/client/models/startfinetuningjobop.py index 91d581eb..805a8721 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +++ b/src/mistralai/client/models/startfinetuningjobop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 663886392468 from __future__ import annotations from .classifierdetailedjobout import ( @@ -16,24 +17,24 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): +class StartFineTuningJobRequestTypedDict(TypedDict): job_id: str -class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): +class StartFineTuningJobRequest(BaseModel): job_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] -JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", +StartFineTuningJobResponseTypedDict = TypeAliasType( + "StartFineTuningJobResponseTypedDict", Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], ) r"""OK""" -JobsAPIRoutesFineTuningStartFineTuningJobResponse = Annotated[ +StartFineTuningJobResponse = Annotated[ Union[ClassifierDetailedJobOut, CompletionDetailedJobOut], Field(discriminator="JOB_TYPE"), ] diff --git a/src/mistralai/client/models/systemmessage.py b/src/mistralai/client/models/systemmessage.py index 245e7b61..352eca76 100644 --- a/src/mistralai/client/models/systemmessage.py +++ b/src/mistralai/client/models/systemmessage.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 500ef6e85ba1 from __future__ import annotations from .systemmessagecontentchunks import ( diff --git a/src/mistralai/client/models/systemmessagecontentchunks.py b/src/mistralai/client/models/systemmessagecontentchunks.py index 7a797379..d480a219 100644 --- a/src/mistralai/client/models/systemmessagecontentchunks.py +++ b/src/mistralai/client/models/systemmessagecontentchunks.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 297e8905d5af from __future__ import annotations from .textchunk import TextChunk, TextChunkTypedDict diff --git a/src/mistralai/client/models/textchunk.py b/src/mistralai/client/models/textchunk.py index 4207ce7e..c0584234 100644 --- a/src/mistralai/client/models/textchunk.py +++ b/src/mistralai/client/models/textchunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9c96fb86a9ab from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/thinkchunk.py b/src/mistralai/client/models/thinkchunk.py index b1560806..a999f5d7 100644 --- a/src/mistralai/client/models/thinkchunk.py +++ b/src/mistralai/client/models/thinkchunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 294bfce193a4 from __future__ import annotations from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict diff --git a/src/mistralai/client/models/timestampgranularity.py b/src/mistralai/client/models/timestampgranularity.py index 5bda890f..8d377375 100644 --- a/src/mistralai/client/models/timestampgranularity.py +++ b/src/mistralai/client/models/timestampgranularity.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 68ddf8d702ea from __future__ import annotations from typing import Literal diff --git a/src/mistralai/client/models/tool.py b/src/mistralai/client/models/tool.py index 4b29f575..a46d31f1 100644 --- a/src/mistralai/client/models/tool.py +++ b/src/mistralai/client/models/tool.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 48b4f6f50fe9 from __future__ import annotations from .function import Function, FunctionTypedDict diff --git a/src/mistralai/client/models/toolcall.py b/src/mistralai/client/models/toolcall.py index 558b49bf..4a05bbd0 100644 --- a/src/mistralai/client/models/toolcall.py +++ b/src/mistralai/client/models/toolcall.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fb34a1a3f3c2 from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict diff --git a/src/mistralai/client/models/toolchoice.py b/src/mistralai/client/models/toolchoice.py index 2c7f6cbf..aa2016fb 100644 --- a/src/mistralai/client/models/toolchoice.py +++ b/src/mistralai/client/models/toolchoice.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 14f7e4cc35b6 from __future__ import annotations from .functionname import FunctionName, FunctionNameTypedDict diff --git a/src/mistralai/client/models/toolchoiceenum.py b/src/mistralai/client/models/toolchoiceenum.py index ba8195b8..d66c3d07 100644 --- a/src/mistralai/client/models/toolchoiceenum.py +++ b/src/mistralai/client/models/toolchoiceenum.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c7798801f860 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/toolexecutiondeltaevent.py b/src/mistralai/client/models/toolexecutiondeltaevent.py index aeda1472..384ec240 100644 --- a/src/mistralai/client/models/toolexecutiondeltaevent.py +++ b/src/mistralai/client/models/toolexecutiondeltaevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: df8f17cf3e07 from __future__ import annotations from .builtinconnectors import BuiltInConnectors diff --git a/src/mistralai/client/models/toolexecutiondoneevent.py b/src/mistralai/client/models/toolexecutiondoneevent.py index 88aa5124..56f28899 100644 --- a/src/mistralai/client/models/toolexecutiondoneevent.py +++ b/src/mistralai/client/models/toolexecutiondoneevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 514fdee7d99f from __future__ import annotations from .builtinconnectors import BuiltInConnectors diff --git a/src/mistralai/client/models/toolexecutionentry.py b/src/mistralai/client/models/toolexecutionentry.py index 530c9029..158cbf06 100644 --- a/src/mistralai/client/models/toolexecutionentry.py +++ b/src/mistralai/client/models/toolexecutionentry.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 76db69eebe41 from __future__ import annotations from .builtinconnectors import BuiltInConnectors diff --git a/src/mistralai/client/models/toolexecutionstartedevent.py b/src/mistralai/client/models/toolexecutionstartedevent.py index 3d5f49c7..15918669 100644 --- a/src/mistralai/client/models/toolexecutionstartedevent.py +++ b/src/mistralai/client/models/toolexecutionstartedevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 40fadb8e49a1 from __future__ import annotations from .builtinconnectors import BuiltInConnectors diff --git a/src/mistralai/client/models/toolfilechunk.py b/src/mistralai/client/models/toolfilechunk.py index 62b5ffed..6eebd562 100644 --- a/src/mistralai/client/models/toolfilechunk.py +++ b/src/mistralai/client/models/toolfilechunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 26c8aadf416a from __future__ import annotations from .builtinconnectors import BuiltInConnectors diff --git a/src/mistralai/client/models/toolmessage.py b/src/mistralai/client/models/toolmessage.py index 44fe63e7..b3e8ffd9 100644 --- a/src/mistralai/client/models/toolmessage.py +++ b/src/mistralai/client/models/toolmessage.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 15f1af161031 from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict diff --git a/src/mistralai/client/models/toolreferencechunk.py b/src/mistralai/client/models/toolreferencechunk.py index 882b1563..3c76c8c2 100644 --- a/src/mistralai/client/models/toolreferencechunk.py +++ b/src/mistralai/client/models/toolreferencechunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 822e9f3e70de from __future__ import annotations from .builtinconnectors import BuiltInConnectors diff --git a/src/mistralai/client/models/tooltypes.py b/src/mistralai/client/models/tooltypes.py index abb26c25..e601c196 100644 --- a/src/mistralai/client/models/tooltypes.py +++ b/src/mistralai/client/models/tooltypes.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 86c3b54272fd from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/trainingfile.py b/src/mistralai/client/models/trainingfile.py index 1d9763e0..1f710ff8 100644 --- a/src/mistralai/client/models/trainingfile.py +++ b/src/mistralai/client/models/trainingfile.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2edf9bce227d from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/transcriptionresponse.py b/src/mistralai/client/models/transcriptionresponse.py index 24c0b92e..786863ec 100644 --- a/src/mistralai/client/models/transcriptionresponse.py +++ b/src/mistralai/client/models/transcriptionresponse.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 60896dbc6345 from __future__ import annotations from .transcriptionsegmentchunk import ( diff --git a/src/mistralai/client/models/transcriptionsegmentchunk.py b/src/mistralai/client/models/transcriptionsegmentchunk.py index 25e859e5..c78bec30 100644 --- a/src/mistralai/client/models/transcriptionsegmentchunk.py +++ b/src/mistralai/client/models/transcriptionsegmentchunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d1e6f3bdc74b from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/transcriptionstreamdone.py b/src/mistralai/client/models/transcriptionstreamdone.py index 9ba2aeff..b5740b3b 100644 --- a/src/mistralai/client/models/transcriptionstreamdone.py +++ b/src/mistralai/client/models/transcriptionstreamdone.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 066a9158ed09 from __future__ import annotations from .transcriptionsegmentchunk import ( diff --git a/src/mistralai/client/models/transcriptionstreamevents.py b/src/mistralai/client/models/transcriptionstreamevents.py index 63a08fb5..17161a17 100644 --- a/src/mistralai/client/models/transcriptionstreamevents.py +++ b/src/mistralai/client/models/transcriptionstreamevents.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b50b3d74f16f from __future__ import annotations from .transcriptionstreamdone import ( diff --git a/src/mistralai/client/models/transcriptionstreameventtypes.py b/src/mistralai/client/models/transcriptionstreameventtypes.py index cb6b2889..c74bbb74 100644 --- a/src/mistralai/client/models/transcriptionstreameventtypes.py +++ b/src/mistralai/client/models/transcriptionstreameventtypes.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6f71f6fbf4c5 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/transcriptionstreamlanguage.py b/src/mistralai/client/models/transcriptionstreamlanguage.py index 244103be..67b3e979 100644 --- a/src/mistralai/client/models/transcriptionstreamlanguage.py +++ b/src/mistralai/client/models/transcriptionstreamlanguage.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e94333e4bc27 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/transcriptionstreamsegmentdelta.py b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py index ee014742..8db5e736 100644 --- a/src/mistralai/client/models/transcriptionstreamsegmentdelta.py +++ b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c0a882ce57e5 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/transcriptionstreamtextdelta.py b/src/mistralai/client/models/transcriptionstreamtextdelta.py index feb459ea..49338a08 100644 --- a/src/mistralai/client/models/transcriptionstreamtextdelta.py +++ b/src/mistralai/client/models/transcriptionstreamtextdelta.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6086dc081147 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/unarchiveftmodelout.py b/src/mistralai/client/models/unarchiveftmodelout.py index 511c390b..0249a69e 100644 --- a/src/mistralai/client/models/unarchiveftmodelout.py +++ b/src/mistralai/client/models/unarchiveftmodelout.py @@ -1,23 +1,27 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9dbc3bfb71ed from __future__ import annotations from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -UnarchiveFTModelOutObject = Literal["model",] +from typing_extensions import Annotated, NotRequired, TypedDict class UnarchiveFTModelOutTypedDict(TypedDict): id: str - object: NotRequired[UnarchiveFTModelOutObject] + object: Literal["model"] archived: NotRequired[bool] class UnarchiveFTModelOut(BaseModel): id: str - object: Optional[UnarchiveFTModelOutObject] = "model" + OBJECT: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" archived: Optional[bool] = False diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py b/src/mistralai/client/models/unarchivemodelop.py similarity index 76% rename from src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py rename to src/mistralai/client/models/unarchivemodelop.py index e1be0ac0..1d68a06a 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +++ b/src/mistralai/client/models/unarchivemodelop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: eb18584fd78c from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict(TypedDict): +class UnarchiveModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to unarchive.""" -class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest(BaseModel): +class UnarchiveModelRequest(BaseModel): model_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_updateop.py b/src/mistralai/client/models/updateagentop.py similarity index 86% rename from src/mistralai/client/models/agents_api_v1_agents_updateop.py rename to src/mistralai/client/models/updateagentop.py index 116acaa7..28acc83d 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_updateop.py +++ b/src/mistralai/client/models/updateagentop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ae3a6abea468 from __future__ import annotations from .agentupdaterequest import AgentUpdateRequest, AgentUpdateRequestTypedDict @@ -7,12 +8,12 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1AgentsUpdateRequestTypedDict(TypedDict): +class UpdateAgentRequestTypedDict(TypedDict): agent_id: str agent_update_request: AgentUpdateRequestTypedDict -class AgentsAPIV1AgentsUpdateRequest(BaseModel): +class UpdateAgentRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py b/src/mistralai/client/models/updateagentversionop.py similarity index 81% rename from src/mistralai/client/models/agents_api_v1_agents_update_versionop.py rename to src/mistralai/client/models/updateagentversionop.py index 116f952b..114013bc 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py +++ b/src/mistralai/client/models/updateagentversionop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3821dca5b20a from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1AgentsUpdateVersionRequestTypedDict(TypedDict): +class UpdateAgentVersionRequestTypedDict(TypedDict): agent_id: str version: int -class AgentsAPIV1AgentsUpdateVersionRequest(BaseModel): +class UpdateAgentVersionRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_documents_update_v1op.py b/src/mistralai/client/models/updatedocumentop.py similarity index 87% rename from src/mistralai/client/models/libraries_documents_update_v1op.py rename to src/mistralai/client/models/updatedocumentop.py index f677b4dd..073f22a9 100644 --- a/src/mistralai/client/models/libraries_documents_update_v1op.py +++ b/src/mistralai/client/models/updatedocumentop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: eee9ef317180 from __future__ import annotations from .documentupdatein import DocumentUpdateIn, DocumentUpdateInTypedDict @@ -7,13 +8,13 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsUpdateV1RequestTypedDict(TypedDict): +class UpdateDocumentRequestTypedDict(TypedDict): library_id: str document_id: str document_update_in: DocumentUpdateInTypedDict -class LibrariesDocumentsUpdateV1Request(BaseModel): +class UpdateDocumentRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/updateftmodelin.py b/src/mistralai/client/models/updateftmodelin.py index 0471a154..4ac5a8a2 100644 --- a/src/mistralai/client/models/updateftmodelin.py +++ b/src/mistralai/client/models/updateftmodelin.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 39e2d678e651 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/libraries_update_v1op.py b/src/mistralai/client/models/updatelibraryop.py similarity index 85% rename from src/mistralai/client/models/libraries_update_v1op.py rename to src/mistralai/client/models/updatelibraryop.py index c434ab7a..c5a1ad30 100644 --- a/src/mistralai/client/models/libraries_update_v1op.py +++ b/src/mistralai/client/models/updatelibraryop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4ba7acdb62c6 from __future__ import annotations from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict @@ -7,12 +8,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesUpdateV1RequestTypedDict(TypedDict): +class UpdateLibraryRequestTypedDict(TypedDict): library_id: str library_in_update: LibraryInUpdateTypedDict -class LibrariesUpdateV1Request(BaseModel): +class UpdateLibraryRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/client/models/updatemodelop.py similarity index 77% rename from src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py rename to src/mistralai/client/models/updatemodelop.py index 760c22f4..023be979 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +++ b/src/mistralai/client/models/updatemodelop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ba149ecfe03e from __future__ import annotations from .classifierftmodelout import ClassifierFTModelOut, ClassifierFTModelOutTypedDict @@ -11,13 +12,13 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): +class UpdateModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to update.""" update_ft_model_in: UpdateFTModelInTypedDict -class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): +class UpdateModelRequest(BaseModel): model_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] @@ -29,14 +30,14 @@ class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): ] -JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", +UpdateModelResponseTypedDict = TypeAliasType( + "UpdateModelResponseTypedDict", Union[CompletionFTModelOutTypedDict, ClassifierFTModelOutTypedDict], ) r"""OK""" -JobsAPIRoutesFineTuningUpdateFineTunedModelResponse = Annotated[ +UpdateModelResponse = Annotated[ Union[ClassifierFTModelOut, CompletionFTModelOut], Field(discriminator="MODEL_TYPE") ] r"""OK""" diff --git a/src/mistralai/client/models/libraries_share_create_v1op.py b/src/mistralai/client/models/updateorcreatelibraryaccessop.py similarity index 81% rename from src/mistralai/client/models/libraries_share_create_v1op.py rename to src/mistralai/client/models/updateorcreatelibraryaccessop.py index d0313bd0..1abe6eda 100644 --- a/src/mistralai/client/models/libraries_share_create_v1op.py +++ b/src/mistralai/client/models/updateorcreatelibraryaccessop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ec9b15418f5c from __future__ import annotations from .sharingin import SharingIn, SharingInTypedDict @@ -7,12 +8,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesShareCreateV1RequestTypedDict(TypedDict): +class UpdateOrCreateLibraryAccessRequestTypedDict(TypedDict): library_id: str sharing_in: SharingInTypedDict -class LibrariesShareCreateV1Request(BaseModel): +class UpdateOrCreateLibraryAccessRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_documents_upload_v1op.py b/src/mistralai/client/models/uploaddocumentop.py similarity index 92% rename from src/mistralai/client/models/libraries_documents_upload_v1op.py rename to src/mistralai/client/models/uploaddocumentop.py index 18a5b780..2c957947 100644 --- a/src/mistralai/client/models/libraries_documents_upload_v1op.py +++ b/src/mistralai/client/models/uploaddocumentop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0018fe7ff48c from __future__ import annotations from .file import File, FileTypedDict @@ -40,12 +41,12 @@ class DocumentUpload(BaseModel): """ -class LibrariesDocumentsUploadV1RequestTypedDict(TypedDict): +class UploadDocumentRequestTypedDict(TypedDict): library_id: str request_body: DocumentUploadTypedDict -class LibrariesDocumentsUploadV1Request(BaseModel): +class UploadDocumentRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/files_api_routes_upload_fileop.py b/src/mistralai/client/models/uploadfileop.py similarity index 97% rename from src/mistralai/client/models/files_api_routes_upload_fileop.py rename to src/mistralai/client/models/uploadfileop.py index ab2f1524..50848f0b 100644 --- a/src/mistralai/client/models/files_api_routes_upload_fileop.py +++ b/src/mistralai/client/models/uploadfileop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d67619670938 from __future__ import annotations from .file import File, FileTypedDict diff --git a/src/mistralai/client/models/uploadfileout.py b/src/mistralai/client/models/uploadfileout.py index 55e56504..be291efb 100644 --- a/src/mistralai/client/models/uploadfileout.py +++ b/src/mistralai/client/models/uploadfileout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 42466f2bebfb from __future__ import annotations from .filepurpose import FilePurpose diff --git a/src/mistralai/client/models/usageinfo.py b/src/mistralai/client/models/usageinfo.py index f1186d97..e78f92e7 100644 --- a/src/mistralai/client/models/usageinfo.py +++ b/src/mistralai/client/models/usageinfo.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 54adb9a3af16 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/usermessage.py b/src/mistralai/client/models/usermessage.py index fe64a8cc..25ccdf80 100644 --- a/src/mistralai/client/models/usermessage.py +++ b/src/mistralai/client/models/usermessage.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cb583483acf4 from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict diff --git a/src/mistralai/client/models/validationerror.py b/src/mistralai/client/models/validationerror.py index 352409be..385714c8 100644 --- a/src/mistralai/client/models/validationerror.py +++ b/src/mistralai/client/models/validationerror.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 15df3c7368ab from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/wandbintegration.py b/src/mistralai/client/models/wandbintegration.py index 18e32ac3..c5db4a6d 100644 --- a/src/mistralai/client/models/wandbintegration.py +++ b/src/mistralai/client/models/wandbintegration.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4823c1e80942 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/wandbintegrationout.py b/src/mistralai/client/models/wandbintegrationout.py index 6409f4a4..d0a09bf4 100644 --- a/src/mistralai/client/models/wandbintegrationout.py +++ b/src/mistralai/client/models/wandbintegrationout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6b103d74195c from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/websearchpremiumtool.py b/src/mistralai/client/models/websearchpremiumtool.py index c7825ec3..9588ab1d 100644 --- a/src/mistralai/client/models/websearchpremiumtool.py +++ b/src/mistralai/client/models/websearchpremiumtool.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bfe88af887e3 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/websearchtool.py b/src/mistralai/client/models/websearchtool.py index 7a237d86..27502909 100644 --- a/src/mistralai/client/models/websearchtool.py +++ b/src/mistralai/client/models/websearchtool.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 26b0903423e5 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models_.py b/src/mistralai/client/models_.py index 00708197..05b33ac7 100644 --- a/src/mistralai/client/models_.py +++ b/src/mistralai/client/models_.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1d277958a843 from .basesdk import BaseSDK from mistralai.client import models, utils @@ -67,7 +68,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="list_models_v1_models_get", + operation_id="ListModels", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -144,7 +145,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="list_models_v1_models_get", + operation_id="ListModels", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -195,7 +196,7 @@ def retrieve( else: base_url = self._get_url(base_url, url_variables) - request = models.RetrieveModelV1ModelsModelIDGetRequest( + request = models.RetrieveModelRequest( model_id=model_id, ) @@ -228,7 +229,7 @@ def retrieve( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="retrieve_model_v1_models__model_id__get", + operation_id="RetrieveModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -287,7 +288,7 @@ async def retrieve_async( else: base_url = self._get_url(base_url, url_variables) - request = models.RetrieveModelV1ModelsModelIDGetRequest( + request = models.RetrieveModelRequest( model_id=model_id, ) @@ -320,7 +321,7 @@ async def retrieve_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="retrieve_model_v1_models__model_id__get", + operation_id="RetrieveModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -379,7 +380,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteModelV1ModelsModelIDDeleteRequest( + request = models.DeleteModelRequest( model_id=model_id, ) @@ -412,7 +413,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="delete_model_v1_models__model_id__delete", + operation_id="DeleteModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -469,7 +470,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteModelV1ModelsModelIDDeleteRequest( + request = models.DeleteModelRequest( model_id=model_id, ) @@ -502,7 +503,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="delete_model_v1_models__model_id__delete", + operation_id="DeleteModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -540,7 +541,7 @@ def update( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: + ) -> models.UpdateModelResponse: r"""Update Fine Tuned Model Update a model name or description. @@ -563,7 +564,7 @@ def update( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( + request = models.UpdateModelRequest( model_id=model_id, update_ft_model_in=models.UpdateFTModelIn( name=name, @@ -603,7 +604,7 @@ def update( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", + operation_id="UpdateModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -615,9 +616,7 @@ def update( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res - ) + return unmarshal_json_response(models.UpdateModelResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -637,7 +636,7 @@ async def update_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: + ) -> models.UpdateModelResponse: r"""Update Fine Tuned Model Update a model name or description. @@ -660,7 +659,7 @@ async def update_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( + request = models.UpdateModelRequest( model_id=model_id, update_ft_model_in=models.UpdateFTModelIn( name=name, @@ -700,7 +699,7 @@ async def update_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", + operation_id="UpdateModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -712,9 +711,7 @@ async def update_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res - ) + return unmarshal_json_response(models.UpdateModelResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -753,7 +750,7 @@ def archive( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( + request = models.ArchiveModelRequest( model_id=model_id, ) @@ -786,7 +783,7 @@ def archive( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", + operation_id="ArchiveModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -837,7 +834,7 @@ async def archive_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( + request = models.ArchiveModelRequest( model_id=model_id, ) @@ -870,7 +867,7 @@ async def archive_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", + operation_id="ArchiveModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -921,7 +918,7 @@ def unarchive( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( + request = models.UnarchiveModelRequest( model_id=model_id, ) @@ -954,7 +951,7 @@ def unarchive( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", + operation_id="UnarchiveModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1005,7 +1002,7 @@ async def unarchive_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( + request = models.UnarchiveModelRequest( model_id=model_id, ) @@ -1038,7 +1035,7 @@ async def unarchive_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", + operation_id="UnarchiveModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security diff --git a/src/mistralai/client/ocr.py b/src/mistralai/client/ocr.py index ce7e2126..2aa38229 100644 --- a/src/mistralai/client/ocr.py +++ b/src/mistralai/client/ocr.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2f804a12fc62 from .basesdk import BaseSDK from mistralai.client import models, utils diff --git a/src/mistralai/client/sdk.py b/src/mistralai/client/sdk.py index 99579400..b1ab5493 100644 --- a/src/mistralai/client/sdk.py +++ b/src/mistralai/client/sdk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 48edbcb38d7e from .basesdk import BaseSDK from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients @@ -30,7 +31,10 @@ class Mistral(BaseSDK): - r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" + r"""Mistral AI API: Dora OpenAPI schema + + Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + """ models: "Models" r"""Model Management API""" diff --git a/src/mistralai/client/sdkconfiguration.py b/src/mistralai/client/sdkconfiguration.py index df50d16f..712e92e0 100644 --- a/src/mistralai/client/sdkconfiguration.py +++ b/src/mistralai/client/sdkconfiguration.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b7dd68a0235e from ._version import ( __gen_version__, diff --git a/src/mistralai/client/transcriptions.py b/src/mistralai/client/transcriptions.py index 45501024..f7ef5b0a 100644 --- a/src/mistralai/client/transcriptions.py +++ b/src/mistralai/client/transcriptions.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 75b45780c978 from .basesdk import BaseSDK from mistralai.client import models, utils diff --git a/src/mistralai/client/types/__init__.py b/src/mistralai/client/types/__init__.py index fc76fe0c..cf838643 100644 --- a/src/mistralai/client/types/__init__.py +++ b/src/mistralai/client/types/__init__.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 000b943f821c from .basemodel import ( BaseModel, diff --git a/src/mistralai/client/types/basemodel.py b/src/mistralai/client/types/basemodel.py index a9a640a1..4e889aa0 100644 --- a/src/mistralai/client/types/basemodel.py +++ b/src/mistralai/client/types/basemodel.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7ec465a1d3ff from pydantic import ConfigDict, model_serializer from pydantic import BaseModel as PydanticBaseModel diff --git a/src/mistralai/client/utils/__init__.py b/src/mistralai/client/utils/__init__.py index f9c2edce..7ed3a420 100644 --- a/src/mistralai/client/utils/__init__.py +++ b/src/mistralai/client/utils/__init__.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b69505f4b269 from typing import TYPE_CHECKING from importlib import import_module diff --git a/src/mistralai/client/utils/annotations.py b/src/mistralai/client/utils/annotations.py index 12e0aa4f..4b60ab8e 100644 --- a/src/mistralai/client/utils/annotations.py +++ b/src/mistralai/client/utils/annotations.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1ffdedfc66a2 from enum import Enum from typing import Any, Optional diff --git a/src/mistralai/client/utils/datetimes.py b/src/mistralai/client/utils/datetimes.py index a6c52cd6..a2c94fac 100644 --- a/src/mistralai/client/utils/datetimes.py +++ b/src/mistralai/client/utils/datetimes.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c40066d868c9 from datetime import datetime import sys diff --git a/src/mistralai/client/utils/enums.py b/src/mistralai/client/utils/enums.py index 3324e1bc..d897495f 100644 --- a/src/mistralai/client/utils/enums.py +++ b/src/mistralai/client/utils/enums.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a0735873b5ac import enum import sys diff --git a/src/mistralai/client/utils/eventstreaming.py b/src/mistralai/client/utils/eventstreaming.py index 0969899b..3fe3c7e1 100644 --- a/src/mistralai/client/utils/eventstreaming.py +++ b/src/mistralai/client/utils/eventstreaming.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3263d7502030 import re import json diff --git a/src/mistralai/client/utils/forms.py b/src/mistralai/client/utils/forms.py index f961e76b..2b474b9a 100644 --- a/src/mistralai/client/utils/forms.py +++ b/src/mistralai/client/utils/forms.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 58842e905fce from typing import ( Any, diff --git a/src/mistralai/client/utils/headers.py b/src/mistralai/client/utils/headers.py index 37864cbb..64911872 100644 --- a/src/mistralai/client/utils/headers.py +++ b/src/mistralai/client/utils/headers.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9066de2ead8b from typing import ( Any, diff --git a/src/mistralai/client/utils/logger.py b/src/mistralai/client/utils/logger.py index 2ef27ee5..3edad830 100644 --- a/src/mistralai/client/utils/logger.py +++ b/src/mistralai/client/utils/logger.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 745023607a1f import httpx import logging diff --git a/src/mistralai/client/utils/metadata.py b/src/mistralai/client/utils/metadata.py index 173b3e5c..d46ffa59 100644 --- a/src/mistralai/client/utils/metadata.py +++ b/src/mistralai/client/utils/metadata.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d49d535ae52c from typing import Optional, Type, TypeVar, Union from dataclasses import dataclass diff --git a/src/mistralai/client/utils/queryparams.py b/src/mistralai/client/utils/queryparams.py index c04e0db8..0b78c548 100644 --- a/src/mistralai/client/utils/queryparams.py +++ b/src/mistralai/client/utils/queryparams.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bb77d4664844 from typing import ( Any, diff --git a/src/mistralai/client/utils/requestbodies.py b/src/mistralai/client/utils/requestbodies.py index 1de32b6d..3aae69c7 100644 --- a/src/mistralai/client/utils/requestbodies.py +++ b/src/mistralai/client/utils/requestbodies.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 946cfcd26ee4 import io from dataclasses import dataclass diff --git a/src/mistralai/client/utils/retries.py b/src/mistralai/client/utils/retries.py index 88a91b10..90c008b0 100644 --- a/src/mistralai/client/utils/retries.py +++ b/src/mistralai/client/utils/retries.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5f1a5b90423c import asyncio import random diff --git a/src/mistralai/client/utils/security.py b/src/mistralai/client/utils/security.py index 3b8526bf..4c73806d 100644 --- a/src/mistralai/client/utils/security.py +++ b/src/mistralai/client/utils/security.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1acb7c006265 import base64 diff --git a/src/mistralai/client/utils/serializers.py b/src/mistralai/client/utils/serializers.py index 14321eb4..fbc2772d 100644 --- a/src/mistralai/client/utils/serializers.py +++ b/src/mistralai/client/utils/serializers.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 53c57c7f29a8 from decimal import Decimal import functools diff --git a/src/mistralai/client/utils/unmarshal_json_response.py b/src/mistralai/client/utils/unmarshal_json_response.py index 6d43d6e4..65190e5c 100644 --- a/src/mistralai/client/utils/unmarshal_json_response.py +++ b/src/mistralai/client/utils/unmarshal_json_response.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b13585fc5626 from typing import Any, Optional, Type, TypeVar, overload diff --git a/src/mistralai/client/utils/url.py b/src/mistralai/client/utils/url.py index c78ccbae..27a6a3a0 100644 --- a/src/mistralai/client/utils/url.py +++ b/src/mistralai/client/utils/url.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3c6496c17510 from decimal import Decimal from typing import ( diff --git a/src/mistralai/client/utils/values.py b/src/mistralai/client/utils/values.py index dae01a44..2469a9f3 100644 --- a/src/mistralai/client/utils/values.py +++ b/src/mistralai/client/utils/values.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bb6ade7a7f82 from datetime import datetime from enum import Enum diff --git a/uv.lock b/uv.lock index caa731ed..1e667c77 100644 --- a/uv.lock +++ b/uv.lock @@ -625,7 +625,7 @@ requires-dist = [ { name = "opentelemetry-api", specifier = ">=1.33.1,<2.0.0" }, { name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.37.0,<2.0.0" }, { name = "opentelemetry-sdk", specifier = ">=1.33.1,<2.0.0" }, - { name = "opentelemetry-semantic-conventions", specifier = ">=0.59b0,<0.60" }, + { name = "opentelemetry-semantic-conventions", specifier = ">=0.59b0,<0.61" }, { name = "pydantic", specifier = ">=2.10.3" }, { name = "python-dateutil", specifier = ">=2.8.2" }, { name = "pyyaml", specifier = ">=6.0.2,<7.0.0" }, From 8490078bb9387caadf2d6384ee6f4724d7c1f14a Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Mon, 23 Feb 2026 10:14:27 +0100 Subject: [PATCH 26/42] docs: add callout directing v1 users to v1 branch (#368) Users installing from PyPI get v1 but main branch docs are for v2, causing confusion with import paths. Add prominent notice at top of README directing them to the v1 branch for correct documentation. Fixes #366 --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 53de43f5..5c8a1b51 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,10 @@ # Mistral Python Client +> [!IMPORTANT] +> **Looking for v1 documentation?** If you installed `mistralai` from PyPI (e.g., `pip install mistralai`), you are using **v1** of the SDK. The documentation on this branch (`main`) is for **v2**, which is not yet released on PyPI. +> +> **➡️ [Go to the v1 branch for v1 documentation](https://github.com/mistralai/client-python/tree/v1)** + ## Migration warning This documentation is for Mistral AI SDK v2. You can find more details on how to migrate from v1 to v2 [here](MIGRATION.md) From 969c992f1decf803bcc6e61007ab37c4d596ca35 Mon Sep 17 00:00:00 2001 From: Nelson PROIA <144663685+Nelson-PROIA@users.noreply.github.com> Date: Tue, 24 Feb 2026 16:56:30 +0100 Subject: [PATCH 27/42] feat!: align Azure/GCP SDKs with namespace vision (API-1988) (#351) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Migrate Azure/GCP provider SDKs to PEP 420 namespace sub-packages: - mistralai_azure → mistralai.azure.client.MistralAzure - mistralai_gcp → mistralai.gcp.client.MistralGCP Azure: auto-inject api_version query param, warn on custom client conflict GCP: auto-detect credentials via google.auth.default(), refresh tokens only when expired, dynamically construct Vertex AI paths from model name Added 348 parity tests, async FIM integration tests, lint coverage for provider packages. --- .genignore | 2 +- .pre-commit-config.yaml | 6 +- .speakeasy/workflow.lock | 26 +- .speakeasy/workflow.yaml | 4 +- MIGRATION.md | 20 + README.md | 98 +-- examples/azure/.env.example | 4 + examples/azure/az_chat_no_streaming.py | 15 - examples/azure/chat_no_streaming.py | 19 +- examples/gcp/.env.example | 3 + examples/gcp/async_chat_no_streaming.py | 36 +- examples/gcp/gcp_async_chat_no_streaming.py | 24 - packages/azure/.genignore | 6 + .../{mistralai_azure => azure}/.gitattributes | 0 .../{mistralai_azure => azure}/.gitignore | 0 .../.speakeasy/gen.lock | 711 +++++++++--------- .../.speakeasy/gen.yaml | 21 +- .../CONTRIBUTING.md | 0 packages/{mistralai_azure => azure}/README.md | 282 ++++--- .../{mistralai_azure => azure}/RELEASES.md | 0 packages/azure/USAGE.md | 70 ++ .../docs/models/arguments.md | 0 .../docs/models/assistantmessage.md | 4 +- .../docs/models/assistantmessagecontent.md | 0 .../docs/models/chatcompletionchoice.md | 0 .../chatcompletionchoicefinishreason.md | 0 .../docs/models/chatcompletionrequest.md | 2 +- .../models/chatcompletionrequestmessage.md} | 2 +- .../docs/models/chatcompletionrequeststop.md | 0 .../models/chatcompletionrequesttoolchoice.md | 0 .../docs/models/chatcompletionresponse.md | 0 .../models/chatcompletionstreamrequest.md | 4 +- .../chatcompletionstreamrequestmessage.md} | 2 +- .../chatcompletionstreamrequeststop.md} | 2 +- .../chatcompletionstreamrequesttoolchoice.md | 0 .../docs/models/completionchunk.md | 0 .../docs/models/completionevent.md | 0 .../models/completionresponsestreamchoice.md | 10 + ...letionresponsestreamchoicefinishreason.md} | 2 +- .../docs/models/contentchunk.md | 0 packages/azure/docs/models/deltamessage.md | 10 + .../docs/models/deltamessagecontent.md} | 2 +- .../docs/models/document.md | 0 .../azure/docs/models/documenturlchunk.md | 10 + .../docs/models/filechunk.md | 0 .../docs/models/format_.md | 0 .../docs/models/function.md | 0 .../docs/models/functioncall.md | 0 .../docs/models/functionname.md | 0 .../docs/models/httpvalidationerror.md | 0 packages/azure/docs/models/imagedetail.md | 10 + packages/azure/docs/models/imageurl.md | 9 + packages/azure/docs/models/imageurlchunk.md | 11 + .../docs/models/imageurlunion.md} | 2 +- .../docs/models/jsonschema.md | 0 .../docs/models/loc.md | 0 .../docs/models/mistralpromptmode.md | 0 .../docs/models/ocrimageobject.md | 0 .../docs/models/ocrpagedimensions.md | 0 .../docs/models/ocrpageobject.md | 0 .../docs/models/ocrrequest.md | 0 .../docs/models/ocrresponse.md | 0 .../docs/models/ocrtableobject.md | 0 .../docs/models/ocrusageinfo.md | 0 .../docs/models/prediction.md | 0 packages/azure/docs/models/referencechunk.md | 9 + .../docs/models/responseformat.md | 0 .../docs/models/responseformats.md | 0 .../docs/models/security.md | 0 .../docs/models/systemmessage.md | 4 +- .../docs/models/systemmessagecontent.md | 0 .../docs/models/systemmessagecontentchunks.md | 0 .../docs/models/tableformat.md | 0 packages/azure/docs/models/textchunk.md | 9 + .../docs/models/thinkchunk.md | 4 +- .../docs/models/thinking.md | 0 .../docs/models/tool.md | 0 .../docs/models/toolcall.md | 0 .../docs/models/toolchoice.md | 0 .../docs/models/toolchoiceenum.md | 0 .../docs/models/toolmessage.md | 4 +- .../docs/models/toolmessagecontent.md | 0 .../docs/models/tooltypes.md | 0 .../docs/models/usageinfo.md | 0 .../docs/models/usermessage.md | 4 +- .../docs/models/usermessagecontent.md | 0 .../docs/models/utils/retryconfig.md | 0 .../docs/models/validationerror.md | 0 .../docs/sdks/chat/README.md | 42 +- .../docs/sdks/mistralazure/README.md | 0 packages/{mistralai_azure => azure}/py.typed | 0 packages/{mistralai_azure => azure}/pylintrc | 0 .../{mistralai_azure => azure}/pyproject.toml | 15 +- .../scripts/prepare_readme.py | 2 +- .../scripts/publish.sh | 0 .../src/mistralai/azure/client}/__init__.py | 0 .../azure/client}/_hooks/__init__.py | 0 .../azure/client}/_hooks/registration.py | 9 +- .../azure/client}/_hooks/sdkhooks.py | 2 +- .../mistralai/azure/client}/_hooks/types.py | 4 +- .../src/mistralai/azure/client}/_version.py | 6 +- .../src/mistralai/azure/client}/basesdk.py | 10 +- .../src/mistralai/azure/client}/chat.py | 54 +- .../src/mistralai/azure/client}/httpclient.py | 0 .../azure/client}/models/__init__.py | 114 +-- .../azure/client}/models/assistantmessage.py | 23 +- .../client}/models/chatcompletionchoice.py | 2 +- .../client}/models/chatcompletionrequest.py | 14 +- .../client}/models/chatcompletionresponse.py | 2 +- .../models/chatcompletionstreamrequest.py | 26 +- .../azure/client}/models/completionchunk.py | 2 +- .../azure/client}/models/completionevent.py | 2 +- .../models/completionresponsestreamchoice.py | 13 +- .../azure/client}/models/contentchunk.py | 2 +- .../azure/client}/models/deltamessage.py | 14 +- .../azure/client}/models/documenturlchunk.py | 24 +- .../azure/client}/models/filechunk.py | 4 +- .../azure/client}/models/function.py | 2 +- .../azure/client}/models/functioncall.py | 2 +- .../azure/client}/models/functionname.py | 2 +- .../client}/models/httpvalidationerror.py | 4 +- .../azure/client/models/imagedetail.py | 15 + .../azure/client}/models/imageurl.py | 7 +- .../azure/client/models/imageurlchunk.py | 38 + .../azure/client}/models/jsonschema.py | 2 +- .../azure/client}/models/mistralazureerror.py | 0 .../azure/client}/models/mistralpromptmode.py | 2 +- .../azure/client}/models/no_response_error.py | 0 .../azure/client}/models/ocrimageobject.py | 2 +- .../azure/client}/models/ocrpagedimensions.py | 2 +- .../azure/client}/models/ocrpageobject.py | 2 +- .../azure/client}/models/ocrrequest.py | 2 +- .../azure/client}/models/ocrresponse.py | 2 +- .../azure/client}/models/ocrtableobject.py | 13 +- .../azure/client}/models/ocrusageinfo.py | 2 +- .../azure/client}/models/prediction.py | 4 +- .../azure/client/models/referencechunk.py | 25 + .../azure/client}/models/responseformat.py | 2 +- .../azure/client}/models/responseformats.py | 0 .../client}/models/responsevalidationerror.py | 2 +- .../azure/client}/models/sdkerror.py | 2 +- .../azure/client}/models/security.py | 4 +- .../azure/client}/models/systemmessage.py | 19 +- .../models/systemmessagecontentchunks.py | 6 +- .../azure/client/models/textchunk.py | 23 + .../azure/client}/models/thinkchunk.py | 19 +- .../mistralai/azure/client}/models/tool.py | 2 +- .../azure/client}/models/toolcall.py | 2 +- .../azure/client}/models/toolchoice.py | 2 +- .../azure/client}/models/toolchoiceenum.py | 0 .../azure/client}/models/toolmessage.py | 23 +- .../azure/client}/models/tooltypes.py | 2 +- .../azure/client}/models/usageinfo.py | 2 +- .../azure/client}/models/usermessage.py | 21 +- .../azure/client}/models/validationerror.py | 2 +- .../src/mistralai/azure/client}/ocr.py | 8 +- .../src/mistralai/azure/client}/py.typed | 0 .../src/mistralai/azure/client}/sdk.py | 108 ++- .../azure/client}/sdkconfiguration.py | 4 +- .../mistralai/azure/client}/types/__init__.py | 0 .../azure/client}/types/basemodel.py | 0 .../mistralai/azure/client}/utils/__init__.py | 0 .../azure/client}/utils/annotations.py | 0 .../azure/client}/utils/datetimes.py | 0 .../mistralai/azure/client}/utils/enums.py | 0 .../azure/client}/utils/eventstreaming.py | 0 .../mistralai/azure/client}/utils/forms.py | 0 .../mistralai/azure/client}/utils/headers.py | 0 .../mistralai/azure/client}/utils/logger.py | 0 .../mistralai/azure/client}/utils/metadata.py | 0 .../azure/client}/utils/queryparams.py | 0 .../azure/client}/utils/requestbodies.py | 0 .../mistralai/azure/client}/utils/retries.py | 0 .../mistralai/azure/client}/utils/security.py | 0 .../azure/client}/utils/serializers.py | 0 .../client}/utils/unmarshal_json_response.py | 2 +- .../src/mistralai/azure/client}/utils/url.py | 0 .../mistralai/azure/client}/utils/values.py | 0 packages/{mistralai_azure => azure}/uv.lock | 2 +- packages/gcp/.genignore | 6 + .../{mistralai_gcp => gcp}/.gitattributes | 0 packages/{mistralai_gcp => gcp}/.gitignore | 0 .../.speakeasy/gen.lock | 659 ++++++++-------- .../.speakeasy/gen.yaml | 24 +- .../{mistralai_gcp => gcp}/CONTRIBUTING.md | 0 packages/{mistralai_gcp => gcp}/README.md | 185 ++--- packages/{mistralai_gcp => gcp}/RELEASES.md | 0 packages/gcp/USAGE.md | 61 ++ .../docs/models/arguments.md | 0 .../docs/models/assistantmessage.md | 4 +- .../docs/models/assistantmessagecontent.md | 0 .../docs/models/chatcompletionchoice.md | 0 .../chatcompletionchoicefinishreason.md | 0 .../docs/models/chatcompletionrequest.md | 2 +- .../models/chatcompletionrequestmessage.md} | 2 +- .../docs/models/chatcompletionrequeststop.md | 0 .../models/chatcompletionrequesttoolchoice.md | 0 .../docs/models/chatcompletionresponse.md | 0 .../models/chatcompletionstreamrequest.md | 4 +- .../chatcompletionstreamrequestmessage.md} | 2 +- .../chatcompletionstreamrequeststop.md} | 2 +- .../chatcompletionstreamrequesttoolchoice.md | 0 .../docs/models/completionchunk.md | 0 .../docs/models/completionevent.md | 0 .../models/completionresponsestreamchoice.md | 10 + ...letionresponsestreamchoicefinishreason.md} | 2 +- .../docs/models/contentchunk.md | 0 packages/gcp/docs/models/deltamessage.md | 10 + .../docs/models/deltamessagecontent.md} | 2 +- .../docs/models/fimcompletionrequest.md | 0 .../docs/models/fimcompletionrequeststop.md | 0 .../docs/models/fimcompletionresponse.md | 0 .../docs/models/fimcompletionstreamrequest.md | 0 .../models/fimcompletionstreamrequeststop.md | 0 .../docs/models/function.md | 0 .../docs/models/functioncall.md | 0 .../docs/models/functionname.md | 0 .../docs/models/httpvalidationerror.md | 0 packages/gcp/docs/models/imagedetail.md | 10 + packages/gcp/docs/models/imageurl.md | 9 + packages/gcp/docs/models/imageurlchunk.md | 11 + .../docs/models/imageurlunion.md} | 2 +- .../docs/models/jsonschema.md | 0 .../{mistralai_gcp => gcp}/docs/models/loc.md | 0 .../docs/models/mistralpromptmode.md | 0 .../docs/models/prediction.md | 0 packages/gcp/docs/models/referencechunk.md | 9 + .../docs/models/responseformat.md | 0 .../docs/models/responseformats.md | 0 .../docs/models/security.md | 0 .../docs/models/systemmessage.md | 4 +- .../docs/models/systemmessagecontent.md | 0 .../docs/models/systemmessagecontentchunks.md | 0 packages/gcp/docs/models/textchunk.md | 9 + .../docs/models/thinkchunk.md | 4 +- .../docs/models/thinking.md | 0 .../docs/models/tool.md | 0 .../docs/models/toolcall.md | 0 .../docs/models/toolchoice.md | 0 .../docs/models/toolchoiceenum.md | 0 .../docs/models/toolmessage.md | 4 +- .../docs/models/toolmessagecontent.md | 0 .../docs/models/tooltypes.md | 0 .../docs/models/usageinfo.md | 0 .../docs/models/usermessage.md | 4 +- .../docs/models/usermessagecontent.md | 0 .../docs/models/utils/retryconfig.md | 0 .../docs/models/validationerror.md | 0 .../docs/sdks/chat/README.md | 32 +- .../docs/sdks/fim/README.md | 28 +- .../docs/sdks/mistralgcp/README.md | 0 packages/{mistralai_gcp => gcp}/py.typed | 0 packages/{mistralai_gcp => gcp}/pylintrc | 0 .../{mistralai_gcp => gcp}/pyproject.toml | 19 +- .../scripts/prepare_readme.py | 2 +- .../{mistralai_gcp => gcp}/scripts/publish.sh | 0 .../src/mistralai/gcp/client}/__init__.py | 0 .../mistralai/gcp/client}/_hooks/__init__.py | 0 .../gcp/client/_hooks/registration.py | 67 ++ .../mistralai/gcp/client}/_hooks/sdkhooks.py | 2 +- .../src/mistralai/gcp/client}/_hooks/types.py | 4 +- .../src/mistralai/gcp/client}/_version.py | 4 +- .../src/mistralai/gcp/client}/basesdk.py | 10 +- .../src/mistralai/gcp/client}/chat.py | 54 +- .../src/mistralai/gcp/client}/fim.py | 10 +- .../src/mistralai/gcp/client}/httpclient.py | 0 .../mistralai/gcp/client}/models/__init__.py | 110 +-- .../gcp/client}/models/assistantmessage.py | 23 +- .../client}/models/chatcompletionchoice.py | 2 +- .../client}/models/chatcompletionrequest.py | 14 +- .../client}/models/chatcompletionresponse.py | 2 +- .../models/chatcompletionstreamrequest.py | 26 +- .../gcp/client}/models/completionchunk.py | 2 +- .../gcp/client}/models/completionevent.py | 2 +- .../models/completionresponsestreamchoice.py | 13 +- .../gcp/client}/models/contentchunk.py | 2 +- .../gcp/client}/models/deltamessage.py | 14 +- .../client}/models/fimcompletionrequest.py | 2 +- .../client}/models/fimcompletionresponse.py | 2 +- .../models/fimcompletionstreamrequest.py | 2 +- .../mistralai/gcp/client}/models/function.py | 2 +- .../gcp/client}/models/functioncall.py | 2 +- .../gcp/client}/models/functionname.py | 2 +- .../gcp/client}/models/httpvalidationerror.py | 6 +- .../gcp/client/models/imagedetail.py | 15 + .../mistralai/gcp/client}/models/imageurl.py | 7 +- .../gcp/client/models/imageurlchunk.py | 36 + .../gcp/client}/models/jsonschema.py | 2 +- .../gcp/client}/models/mistralgcperror.py | 2 +- .../gcp/client}/models/mistralpromptmode.py | 2 +- .../gcp/client}/models/no_response_error.py | 0 .../gcp/client}/models/prediction.py | 4 +- .../gcp/client/models/referencechunk.py | 25 + .../gcp/client}/models/responseformat.py | 2 +- .../gcp/client}/models/responseformats.py | 0 .../client}/models/responsevalidationerror.py | 4 +- .../mistralai/gcp/client}/models/sdkerror.py | 4 +- .../mistralai/gcp/client}/models/security.py | 4 +- .../gcp/client}/models/systemmessage.py | 19 +- .../models/systemmessagecontentchunks.py | 6 +- .../mistralai/gcp/client/models/textchunk.py | 23 + .../gcp/client}/models/thinkchunk.py | 19 +- .../src/mistralai/gcp/client}/models/tool.py | 2 +- .../mistralai/gcp/client}/models/toolcall.py | 2 +- .../gcp/client}/models/toolchoice.py | 2 +- .../gcp/client}/models/toolchoiceenum.py | 0 .../gcp/client}/models/toolmessage.py | 23 +- .../mistralai/gcp/client}/models/tooltypes.py | 2 +- .../mistralai/gcp/client}/models/usageinfo.py | 2 +- .../gcp/client}/models/usermessage.py | 21 +- .../gcp/client}/models/validationerror.py | 2 +- .../src/mistralai/gcp/client}/py.typed | 0 packages/gcp/src/mistralai/gcp/client/sdk.py | 243 ++++++ .../mistralai/gcp/client}/sdkconfiguration.py | 4 +- .../mistralai/gcp/client}/types/__init__.py | 0 .../mistralai/gcp/client}/types/basemodel.py | 0 .../mistralai/gcp/client}/utils/__init__.py | 0 .../gcp/client}/utils/annotations.py | 0 .../mistralai/gcp/client}/utils/datetimes.py | 0 .../src/mistralai/gcp/client}/utils/enums.py | 0 .../gcp/client}/utils/eventstreaming.py | 0 .../src/mistralai/gcp/client}/utils/forms.py | 0 .../mistralai/gcp/client}/utils/headers.py | 0 .../src/mistralai/gcp/client}/utils/logger.py | 0 .../mistralai/gcp/client}/utils/metadata.py | 0 .../gcp/client}/utils/queryparams.py | 0 .../gcp/client}/utils/requestbodies.py | 0 .../mistralai/gcp/client}/utils/retries.py | 0 .../mistralai/gcp/client}/utils/security.py | 0 .../gcp/client}/utils/serializers.py | 0 .../client}/utils/unmarshal_json_response.py | 2 +- .../src/mistralai/gcp/client}/utils/url.py | 0 .../src/mistralai/gcp/client}/utils/values.py | 0 packages/{mistralai_gcp => gcp}/uv.lock | 62 +- packages/mistralai_azure/.genignore | 5 - .../mistralai_azure/.vscode/settings.json | 6 - packages/mistralai_azure/USAGE.md | 55 -- .../docs/models/assistantmessagerole.md | 8 - .../models/completionresponsestreamchoice.md | 10 - .../docs/models/deltamessage.md | 10 - .../docs/models/documenturlchunk.md | 10 - .../docs/models/documenturlchunktype.md | 8 - .../mistralai_azure/docs/models/imageurl.md | 9 - .../docs/models/imageurlchunk.md | 11 - .../docs/models/imageurlchunktype.md | 8 - .../docs/models/referencechunk.md | 9 - .../docs/models/referencechunktype.md | 8 - packages/mistralai_azure/docs/models/role.md | 8 - .../mistralai_azure/docs/models/textchunk.md | 9 - .../docs/models/thinkchunktype.md | 8 - .../docs/models/toolmessagerole.md | 8 - packages/mistralai_azure/docs/models/type.md | 8 - .../docs/models/usermessagerole.md | 8 - .../_hooks/custom_user_agent.py | 22 - .../mistralai_azure/_hooks/registration.py | 15 - .../mistralai_azure/models/imageurlchunk.py | 33 - .../mistralai_azure/models/referencechunk.py | 20 - .../src/mistralai_azure/models/textchunk.py | 20 - packages/mistralai_gcp/.genignore | 5 - packages/mistralai_gcp/.vscode/settings.json | 6 - packages/mistralai_gcp/USAGE.md | 51 -- .../docs/models/assistantmessagerole.md | 8 - .../models/completionresponsestreamchoice.md | 10 - .../mistralai_gcp/docs/models/deltamessage.md | 10 - .../mistralai_gcp/docs/models/imageurl.md | 9 - .../docs/models/imageurlchunk.md | 11 - .../docs/models/imageurlchunktype.md | 8 - .../docs/models/referencechunk.md | 9 - .../docs/models/referencechunktype.md | 8 - packages/mistralai_gcp/docs/models/role.md | 8 - .../mistralai_gcp/docs/models/textchunk.md | 9 - .../docs/models/thinkchunktype.md | 8 - .../docs/models/toolmessagerole.md | 8 - packages/mistralai_gcp/docs/models/type.md | 8 - .../docs/models/usermessagerole.md | 8 - .../mistralai_gcp/_hooks/custom_user_agent.py | 22 - .../src/mistralai_gcp/models/imageurlchunk.py | 33 - .../mistralai_gcp/models/referencechunk.py | 20 - .../src/mistralai_gcp/models/textchunk.py | 20 - .../mistralai_gcp/src/mistralai_gcp/sdk.py | 233 ------ pylintrc | 2 +- pyproject.toml | 30 +- scripts/lint_custom_code.sh | 42 +- scripts/run_examples.sh | 29 +- tasks.py | 4 +- tests/test_azure_integration.py | 433 +++++++++++ tests/test_azure_v2_parity.py | 269 +++++++ tests/test_gcp_integration.py | 512 +++++++++++++ tests/test_gcp_v2_parity.py | 330 ++++++++ uv.lock | 4 +- 390 files changed, 4238 insertions(+), 2521 deletions(-) create mode 100644 examples/azure/.env.example delete mode 100644 examples/azure/az_chat_no_streaming.py create mode 100644 examples/gcp/.env.example delete mode 100755 examples/gcp/gcp_async_chat_no_streaming.py create mode 100644 packages/azure/.genignore rename packages/{mistralai_azure => azure}/.gitattributes (100%) rename packages/{mistralai_azure => azure}/.gitignore (100%) rename packages/{mistralai_azure => azure}/.speakeasy/gen.lock (69%) rename packages/{mistralai_azure => azure}/.speakeasy/gen.yaml (78%) rename packages/{mistralai_azure => azure}/CONTRIBUTING.md (100%) rename packages/{mistralai_azure => azure}/README.md (57%) rename packages/{mistralai_azure => azure}/RELEASES.md (100%) create mode 100644 packages/azure/USAGE.md rename packages/{mistralai_azure => azure}/docs/models/arguments.md (100%) rename packages/{mistralai_azure => azure}/docs/models/assistantmessage.md (95%) rename packages/{mistralai_azure => azure}/docs/models/assistantmessagecontent.md (100%) rename packages/{mistralai_azure => azure}/docs/models/chatcompletionchoice.md (100%) rename packages/{mistralai_azure => azure}/docs/models/chatcompletionchoicefinishreason.md (100%) rename packages/{mistralai_azure => azure}/docs/models/chatcompletionrequest.md (99%) rename packages/{mistralai_azure/docs/models/messages.md => azure/docs/models/chatcompletionrequestmessage.md} (92%) rename packages/{mistralai_azure => azure}/docs/models/chatcompletionrequeststop.md (100%) rename packages/{mistralai_azure => azure}/docs/models/chatcompletionrequesttoolchoice.md (100%) rename packages/{mistralai_azure => azure}/docs/models/chatcompletionresponse.md (100%) rename packages/{mistralai_azure => azure}/docs/models/chatcompletionstreamrequest.md (99%) rename packages/{mistralai_gcp/docs/models/chatcompletionrequestmessages.md => azure/docs/models/chatcompletionstreamrequestmessage.md} (91%) rename packages/{mistralai_gcp/docs/models/stop.md => azure/docs/models/chatcompletionstreamrequeststop.md} (88%) rename packages/{mistralai_azure => azure}/docs/models/chatcompletionstreamrequesttoolchoice.md (100%) rename packages/{mistralai_azure => azure}/docs/models/completionchunk.md (100%) rename packages/{mistralai_azure => azure}/docs/models/completionevent.md (100%) create mode 100644 packages/azure/docs/models/completionresponsestreamchoice.md rename packages/{mistralai_gcp/docs/models/finishreason.md => azure/docs/models/completionresponsestreamchoicefinishreason.md} (81%) rename packages/{mistralai_azure => azure}/docs/models/contentchunk.md (100%) create mode 100644 packages/azure/docs/models/deltamessage.md rename packages/{mistralai_gcp/docs/models/content.md => azure/docs/models/deltamessagecontent.md} (89%) rename packages/{mistralai_azure => azure}/docs/models/document.md (100%) create mode 100644 packages/azure/docs/models/documenturlchunk.md rename packages/{mistralai_azure => azure}/docs/models/filechunk.md (100%) rename packages/{mistralai_azure => azure}/docs/models/format_.md (100%) rename packages/{mistralai_azure => azure}/docs/models/function.md (100%) rename packages/{mistralai_azure => azure}/docs/models/functioncall.md (100%) rename packages/{mistralai_azure => azure}/docs/models/functionname.md (100%) rename packages/{mistralai_azure => azure}/docs/models/httpvalidationerror.md (100%) create mode 100644 packages/azure/docs/models/imagedetail.md create mode 100644 packages/azure/docs/models/imageurl.md create mode 100644 packages/azure/docs/models/imageurlchunk.md rename packages/{mistralai_gcp/docs/models/imageurlchunkimageurl.md => azure/docs/models/imageurlunion.md} (86%) rename packages/{mistralai_azure => azure}/docs/models/jsonschema.md (100%) rename packages/{mistralai_azure => azure}/docs/models/loc.md (100%) rename packages/{mistralai_azure => azure}/docs/models/mistralpromptmode.md (100%) rename packages/{mistralai_azure => azure}/docs/models/ocrimageobject.md (100%) rename packages/{mistralai_azure => azure}/docs/models/ocrpagedimensions.md (100%) rename packages/{mistralai_azure => azure}/docs/models/ocrpageobject.md (100%) rename packages/{mistralai_azure => azure}/docs/models/ocrrequest.md (100%) rename packages/{mistralai_azure => azure}/docs/models/ocrresponse.md (100%) rename packages/{mistralai_azure => azure}/docs/models/ocrtableobject.md (100%) rename packages/{mistralai_azure => azure}/docs/models/ocrusageinfo.md (100%) rename packages/{mistralai_azure => azure}/docs/models/prediction.md (100%) create mode 100644 packages/azure/docs/models/referencechunk.md rename packages/{mistralai_azure => azure}/docs/models/responseformat.md (100%) rename packages/{mistralai_azure => azure}/docs/models/responseformats.md (100%) rename packages/{mistralai_azure => azure}/docs/models/security.md (100%) rename packages/{mistralai_gcp => azure}/docs/models/systemmessage.md (88%) rename packages/{mistralai_azure => azure}/docs/models/systemmessagecontent.md (100%) rename packages/{mistralai_azure => azure}/docs/models/systemmessagecontentchunks.md (100%) rename packages/{mistralai_azure => azure}/docs/models/tableformat.md (100%) create mode 100644 packages/azure/docs/models/textchunk.md rename packages/{mistralai_azure => azure}/docs/models/thinkchunk.md (91%) rename packages/{mistralai_azure => azure}/docs/models/thinking.md (100%) rename packages/{mistralai_azure => azure}/docs/models/tool.md (100%) rename packages/{mistralai_azure => azure}/docs/models/toolcall.md (100%) rename packages/{mistralai_azure => azure}/docs/models/toolchoice.md (100%) rename packages/{mistralai_azure => azure}/docs/models/toolchoiceenum.md (100%) rename packages/{mistralai_gcp => azure}/docs/models/toolmessage.md (92%) rename packages/{mistralai_azure => azure}/docs/models/toolmessagecontent.md (100%) rename packages/{mistralai_azure => azure}/docs/models/tooltypes.md (100%) rename packages/{mistralai_azure => azure}/docs/models/usageinfo.md (100%) rename packages/{mistralai_gcp => azure}/docs/models/usermessage.md (89%) rename packages/{mistralai_azure => azure}/docs/models/usermessagecontent.md (100%) rename packages/{mistralai_azure => azure}/docs/models/utils/retryconfig.md (100%) rename packages/{mistralai_azure => azure}/docs/models/validationerror.md (100%) rename packages/{mistralai_azure => azure}/docs/sdks/chat/README.md (95%) rename packages/{mistralai_azure => azure}/docs/sdks/mistralazure/README.md (100%) rename packages/{mistralai_azure => azure}/py.typed (100%) rename packages/{mistralai_azure => azure}/pylintrc (100%) rename packages/{mistralai_azure => azure}/pyproject.toml (79%) rename packages/{mistralai_gcp => azure}/scripts/prepare_readme.py (96%) rename packages/{mistralai_azure => azure}/scripts/publish.sh (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/__init__.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/_hooks/__init__.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/_hooks/registration.py (70%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/_hooks/sdkhooks.py (97%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/_hooks/types.py (95%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/_version.py (69%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/basesdk.py (98%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/chat.py (96%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/httpclient.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/__init__.py (85%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/assistantmessage.py (80%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/chatcompletionchoice.py (91%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/chatcompletionrequest.py (97%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/chatcompletionresponse.py (92%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/chatcompletionstreamrequest.py (94%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/completionchunk.py (94%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/completionevent.py (87%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/completionresponsestreamchoice.py (82%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/contentchunk.py (93%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/deltamessage.py (81%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/documenturlchunk.py (72%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/filechunk.py (83%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/function.py (90%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/functioncall.py (91%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/functionname.py (89%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/httpvalidationerror.py (87%) create mode 100644 packages/azure/src/mistralai/azure/client/models/imagedetail.py rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/imageurl.py (87%) create mode 100644 packages/azure/src/mistralai/azure/client/models/imageurlchunk.py rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/jsonschema.py (97%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/mistralazureerror.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/mistralpromptmode.py (89%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/no_response_error.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/ocrimageobject.py (98%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/ocrpagedimensions.py (91%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/ocrpageobject.py (98%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/ocrrequest.py (99%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/ocrresponse.py (97%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/ocrtableobject.py (78%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/ocrusageinfo.py (97%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/prediction.py (89%) create mode 100644 packages/azure/src/mistralai/azure/client/models/referencechunk.py rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/responseformat.py (98%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/responseformats.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/responsevalidationerror.py (92%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/sdkerror.py (95%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/security.py (80%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/systemmessage.py (57%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/systemmessagecontentchunks.py (66%) create mode 100644 packages/azure/src/mistralai/azure/client/models/textchunk.py rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/thinkchunk.py (65%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/tool.py (89%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/toolcall.py (92%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/toolchoice.py (93%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/toolchoiceenum.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/toolmessage.py (77%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/tooltypes.py (77%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/usageinfo.py (98%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/usermessage.py (73%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/validationerror.py (90%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/ocr.py (97%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/py.typed (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/sdk.py (59%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/sdkconfiguration.py (93%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/types/__init__.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/types/basemodel.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/__init__.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/annotations.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/datetimes.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/enums.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/eventstreaming.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/forms.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/headers.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/logger.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/metadata.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/queryparams.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/requestbodies.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/retries.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/security.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/serializers.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/unmarshal_json_response.py (95%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/url.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/values.py (100%) rename packages/{mistralai_azure => azure}/uv.lock (99%) create mode 100644 packages/gcp/.genignore rename packages/{mistralai_gcp => gcp}/.gitattributes (100%) rename packages/{mistralai_gcp => gcp}/.gitignore (100%) rename packages/{mistralai_gcp => gcp}/.speakeasy/gen.lock (55%) rename packages/{mistralai_gcp => gcp}/.speakeasy/gen.yaml (78%) rename packages/{mistralai_gcp => gcp}/CONTRIBUTING.md (100%) rename packages/{mistralai_gcp => gcp}/README.md (70%) rename packages/{mistralai_gcp => gcp}/RELEASES.md (100%) create mode 100644 packages/gcp/USAGE.md rename packages/{mistralai_gcp => gcp}/docs/models/arguments.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/assistantmessage.md (95%) rename packages/{mistralai_gcp => gcp}/docs/models/assistantmessagecontent.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/chatcompletionchoice.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/chatcompletionchoicefinishreason.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/chatcompletionrequest.md (99%) rename packages/{mistralai_gcp/docs/models/messages.md => gcp/docs/models/chatcompletionrequestmessage.md} (92%) rename packages/{mistralai_gcp => gcp}/docs/models/chatcompletionrequeststop.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/chatcompletionrequesttoolchoice.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/chatcompletionresponse.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/chatcompletionstreamrequest.md (99%) rename packages/{mistralai_azure/docs/models/chatcompletionrequestmessages.md => gcp/docs/models/chatcompletionstreamrequestmessage.md} (91%) rename packages/{mistralai_azure/docs/models/stop.md => gcp/docs/models/chatcompletionstreamrequeststop.md} (88%) rename packages/{mistralai_gcp => gcp}/docs/models/chatcompletionstreamrequesttoolchoice.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/completionchunk.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/completionevent.md (100%) create mode 100644 packages/gcp/docs/models/completionresponsestreamchoice.md rename packages/{mistralai_azure/docs/models/finishreason.md => gcp/docs/models/completionresponsestreamchoicefinishreason.md} (81%) rename packages/{mistralai_gcp => gcp}/docs/models/contentchunk.md (100%) create mode 100644 packages/gcp/docs/models/deltamessage.md rename packages/{mistralai_azure/docs/models/content.md => gcp/docs/models/deltamessagecontent.md} (89%) rename packages/{mistralai_gcp => gcp}/docs/models/fimcompletionrequest.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/fimcompletionrequeststop.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/fimcompletionresponse.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/fimcompletionstreamrequest.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/fimcompletionstreamrequeststop.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/function.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/functioncall.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/functionname.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/httpvalidationerror.md (100%) create mode 100644 packages/gcp/docs/models/imagedetail.md create mode 100644 packages/gcp/docs/models/imageurl.md create mode 100644 packages/gcp/docs/models/imageurlchunk.md rename packages/{mistralai_azure/docs/models/imageurlchunkimageurl.md => gcp/docs/models/imageurlunion.md} (86%) rename packages/{mistralai_gcp => gcp}/docs/models/jsonschema.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/loc.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/mistralpromptmode.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/prediction.md (100%) create mode 100644 packages/gcp/docs/models/referencechunk.md rename packages/{mistralai_gcp => gcp}/docs/models/responseformat.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/responseformats.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/security.md (100%) rename packages/{mistralai_azure => gcp}/docs/models/systemmessage.md (88%) rename packages/{mistralai_gcp => gcp}/docs/models/systemmessagecontent.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/systemmessagecontentchunks.md (100%) create mode 100644 packages/gcp/docs/models/textchunk.md rename packages/{mistralai_gcp => gcp}/docs/models/thinkchunk.md (91%) rename packages/{mistralai_gcp => gcp}/docs/models/thinking.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/tool.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/toolcall.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/toolchoice.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/toolchoiceenum.md (100%) rename packages/{mistralai_azure => gcp}/docs/models/toolmessage.md (92%) rename packages/{mistralai_gcp => gcp}/docs/models/toolmessagecontent.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/tooltypes.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/usageinfo.md (100%) rename packages/{mistralai_azure => gcp}/docs/models/usermessage.md (89%) rename packages/{mistralai_gcp => gcp}/docs/models/usermessagecontent.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/utils/retryconfig.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/validationerror.md (100%) rename packages/{mistralai_gcp => gcp}/docs/sdks/chat/README.md (96%) rename packages/{mistralai_gcp => gcp}/docs/sdks/fim/README.md (94%) rename packages/{mistralai_gcp => gcp}/docs/sdks/mistralgcp/README.md (100%) rename packages/{mistralai_gcp => gcp}/py.typed (100%) rename packages/{mistralai_gcp => gcp}/pylintrc (100%) rename packages/{mistralai_gcp => gcp}/pyproject.toml (78%) rename packages/{mistralai_azure => gcp}/scripts/prepare_readme.py (96%) rename packages/{mistralai_gcp => gcp}/scripts/publish.sh (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/__init__.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/_hooks/__init__.py (100%) create mode 100644 packages/gcp/src/mistralai/gcp/client/_hooks/registration.py rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/_hooks/sdkhooks.py (97%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/_hooks/types.py (96%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/_version.py (76%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/basesdk.py (98%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/chat.py (96%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/fim.py (98%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/httpclient.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/__init__.py (85%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/assistantmessage.py (81%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/chatcompletionchoice.py (91%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/chatcompletionrequest.py (97%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/chatcompletionresponse.py (93%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/chatcompletionstreamrequest.py (94%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/completionchunk.py (94%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/completionevent.py (88%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/completionresponsestreamchoice.py (82%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/contentchunk.py (93%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/deltamessage.py (81%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/fimcompletionrequest.py (99%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/fimcompletionresponse.py (93%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/fimcompletionstreamrequest.py (99%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/function.py (91%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/functioncall.py (91%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/functionname.py (89%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/httpvalidationerror.py (82%) create mode 100644 packages/gcp/src/mistralai/gcp/client/models/imagedetail.py rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/imageurl.py (88%) create mode 100644 packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/jsonschema.py (97%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/mistralgcperror.py (96%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/mistralpromptmode.py (89%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/no_response_error.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/prediction.py (89%) create mode 100644 packages/gcp/src/mistralai/gcp/client/models/referencechunk.py rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/responseformat.py (98%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/responseformats.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/responsevalidationerror.py (86%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/sdkerror.py (93%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/security.py (81%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/systemmessage.py (57%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/systemmessagecontentchunks.py (66%) create mode 100644 packages/gcp/src/mistralai/gcp/client/models/textchunk.py rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/thinkchunk.py (65%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/tool.py (90%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/toolcall.py (92%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/toolchoice.py (94%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/toolchoiceenum.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/toolmessage.py (77%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/tooltypes.py (78%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/usageinfo.py (98%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/usermessage.py (73%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/validationerror.py (90%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/py.typed (100%) create mode 100644 packages/gcp/src/mistralai/gcp/client/sdk.py rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/sdkconfiguration.py (94%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/types/__init__.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/types/basemodel.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/__init__.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/annotations.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/datetimes.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/enums.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/eventstreaming.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/forms.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/headers.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/logger.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/metadata.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/queryparams.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/requestbodies.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/retries.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/security.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/serializers.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/unmarshal_json_response.py (95%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/url.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/values.py (100%) rename packages/{mistralai_gcp => gcp}/uv.lock (92%) delete mode 100644 packages/mistralai_azure/.genignore delete mode 100644 packages/mistralai_azure/.vscode/settings.json delete mode 100644 packages/mistralai_azure/USAGE.md delete mode 100644 packages/mistralai_azure/docs/models/assistantmessagerole.md delete mode 100644 packages/mistralai_azure/docs/models/completionresponsestreamchoice.md delete mode 100644 packages/mistralai_azure/docs/models/deltamessage.md delete mode 100644 packages/mistralai_azure/docs/models/documenturlchunk.md delete mode 100644 packages/mistralai_azure/docs/models/documenturlchunktype.md delete mode 100644 packages/mistralai_azure/docs/models/imageurl.md delete mode 100644 packages/mistralai_azure/docs/models/imageurlchunk.md delete mode 100644 packages/mistralai_azure/docs/models/imageurlchunktype.md delete mode 100644 packages/mistralai_azure/docs/models/referencechunk.md delete mode 100644 packages/mistralai_azure/docs/models/referencechunktype.md delete mode 100644 packages/mistralai_azure/docs/models/role.md delete mode 100644 packages/mistralai_azure/docs/models/textchunk.md delete mode 100644 packages/mistralai_azure/docs/models/thinkchunktype.md delete mode 100644 packages/mistralai_azure/docs/models/toolmessagerole.md delete mode 100644 packages/mistralai_azure/docs/models/type.md delete mode 100644 packages/mistralai_azure/docs/models/usermessagerole.md delete mode 100644 packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py delete mode 100644 packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py delete mode 100644 packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py delete mode 100644 packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py delete mode 100644 packages/mistralai_azure/src/mistralai_azure/models/textchunk.py delete mode 100644 packages/mistralai_gcp/.genignore delete mode 100644 packages/mistralai_gcp/.vscode/settings.json delete mode 100644 packages/mistralai_gcp/USAGE.md delete mode 100644 packages/mistralai_gcp/docs/models/assistantmessagerole.md delete mode 100644 packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md delete mode 100644 packages/mistralai_gcp/docs/models/deltamessage.md delete mode 100644 packages/mistralai_gcp/docs/models/imageurl.md delete mode 100644 packages/mistralai_gcp/docs/models/imageurlchunk.md delete mode 100644 packages/mistralai_gcp/docs/models/imageurlchunktype.md delete mode 100644 packages/mistralai_gcp/docs/models/referencechunk.md delete mode 100644 packages/mistralai_gcp/docs/models/referencechunktype.md delete mode 100644 packages/mistralai_gcp/docs/models/role.md delete mode 100644 packages/mistralai_gcp/docs/models/textchunk.md delete mode 100644 packages/mistralai_gcp/docs/models/thinkchunktype.md delete mode 100644 packages/mistralai_gcp/docs/models/toolmessagerole.md delete mode 100644 packages/mistralai_gcp/docs/models/type.md delete mode 100644 packages/mistralai_gcp/docs/models/usermessagerole.md delete mode 100644 packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py delete mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py delete mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py delete mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py delete mode 100644 packages/mistralai_gcp/src/mistralai_gcp/sdk.py create mode 100644 tests/test_azure_integration.py create mode 100644 tests/test_azure_v2_parity.py create mode 100644 tests/test_gcp_integration.py create mode 100644 tests/test_gcp_v2_parity.py diff --git a/.genignore b/.genignore index b80cf0f6..6bd11b26 100644 --- a/.genignore +++ b/.genignore @@ -1,6 +1,6 @@ pyproject.toml examples/* /utils/* -src/mistral/extra/* +src/mistralai/extra/* pylintrc scripts/prepare_readme.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 39e850eb..9be71784 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,17 +4,17 @@ repos: hooks: - id: ruff args: [--fix] - files: ^(example/|src/mistralai/).*\.py$ + files: ^(examples/|src/mistralai/|packages/(azure|gcp)/src/mistralai/).*\.py$ exclude: ^src/mistralai/(__init__|sdkhooks|types)\.py$ - repo: https://github.com/RobertCraigie/pyright-python rev: v1.1.401 hooks: - id: pyright - files: ^(example/|src/mistralai/).*\.py$ + files: ^(examples/|src/mistralai/|packages/(azure|gcp)/src/mistralai/).*\.py$ exclude: ^src/mistralai/(__init__|sdkhooks|types)\.py$ - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.15.0 hooks: - id: mypy - files: ^(example/|src/mistralai/).*\.py$ + files: ^(examples/|src/mistralai/|packages/(azure|gcp)/src/mistralai/).*\.py$ exclude: ^src/mistralai/(__init__|sdkhooks|types)\.py$ diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index a9e18489..4aa0af42 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -2,14 +2,14 @@ speakeasyVersion: 1.685.0 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:d303e640ad565cc8a9801519b20dc7eab226efdfdab951c11256962d9e479f74 - sourceBlobDigest: sha256:6e4c789de61b2c9c604bf581e0abbadae90e360491d95ec4247678f4f70cee87 + sourceRevisionDigest: sha256:e32d21a6317d1bca6ab29f05603b96038e841752c2698aab47f434ea0d6530b7 + sourceBlobDigest: sha256:2dad2b1b7a79de6917c363ce7e870d11efe31ac08e3bfe0258f72823fe1ad13e tags: - latest mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:351c4d392b8b2220c337a207e98ed5665ed27fd85de854871a70c4bc2b9c0784 - sourceBlobDigest: sha256:d79b21f70efb93b0cd261d2044939a288beaf8707a7caae86aca5c4d5de3821b + sourceRevisionDigest: sha256:4d9938ab74c4d41d62cd24234c8b8109e286c4aeec093e21d369259a43173113 + sourceBlobDigest: sha256:5a558d5ea7a936723c7a5540db5a1fba63d85d25b453372e1cf16395b30c98d3 tags: - latest mistral-openapi: @@ -22,24 +22,24 @@ targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:d303e640ad565cc8a9801519b20dc7eab226efdfdab951c11256962d9e479f74 - sourceBlobDigest: sha256:6e4c789de61b2c9c604bf581e0abbadae90e360491d95ec4247678f4f70cee87 + sourceRevisionDigest: sha256:e32d21a6317d1bca6ab29f05603b96038e841752c2698aab47f434ea0d6530b7 + sourceBlobDigest: sha256:2dad2b1b7a79de6917c363ce7e870d11efe31ac08e3bfe0258f72823fe1ad13e codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:0109302b87fa17b0103ef1e372fae76356811b3c552103e659bd5373d537d759 + codeSamplesRevisionDigest: sha256:a34c3049c604d0bb67101d042e959f14098964fe784f98975a9201c84dbf44d0 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:351c4d392b8b2220c337a207e98ed5665ed27fd85de854871a70c4bc2b9c0784 - sourceBlobDigest: sha256:d79b21f70efb93b0cd261d2044939a288beaf8707a7caae86aca5c4d5de3821b + sourceRevisionDigest: sha256:4d9938ab74c4d41d62cd24234c8b8109e286c4aeec093e21d369259a43173113 + sourceBlobDigest: sha256:5a558d5ea7a936723c7a5540db5a1fba63d85d25b453372e1cf16395b30c98d3 codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:09bb7cbf291076170d228116db05d1c9606af541b301b6564609c4d76633258a + codeSamplesRevisionDigest: sha256:fa36e5999e79c32e8b2c1317cc0d6ed179912ced15194f02b5f80da22e45ae5f mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi sourceRevisionDigest: sha256:74d0de7750f6a1878b68c9da683eba7a447d7c367131d0cb8f5c3b1e05829624 sourceBlobDigest: sha256:41e8354c48993fc29be68959d835ea4f8e0cc1d4b4fbd527afcd970bc02c62a2 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:f37fb6188ad25957bef4cadaa03f454a4f9ab0c045db633a46d9cc89af145ba2 + codeSamplesRevisionDigest: sha256:99fcae1bc81801e3825648a44f5ffa62a8f124e3186e5570be40414de164e7f2 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.685.0 @@ -57,7 +57,7 @@ workflow: mistralai-azure-sdk: target: python source: mistral-azure-source - output: ./packages/mistralai_azure + output: ./packages/azure publish: pypi: token: $pypi_token @@ -68,7 +68,7 @@ workflow: mistralai-gcp-sdk: target: python source: mistral-google-cloud-source - output: ./packages/mistralai_gcp + output: ./packages/gcp publish: pypi: token: $pypi_token diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index b45d6b3b..ba109c09 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -14,7 +14,7 @@ targets: mistralai-azure-sdk: target: python source: mistral-azure-source - output: ./packages/mistralai_azure + output: ./packages/azure publish: pypi: token: $pypi_token @@ -25,7 +25,7 @@ targets: mistralai-gcp-sdk: target: python source: mistral-google-cloud-source - output: ./packages/mistralai_gcp + output: ./packages/gcp publish: pypi: token: $pypi_token diff --git a/MIGRATION.md b/MIGRATION.md index 9f39cdb5..906173fe 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -33,6 +33,26 @@ from mistralai.client.types import BaseModel | `from mistralai.types import ...` | `from mistralai.client.types import ...` | | `from mistralai.utils import ...` | `from mistralai.client.utils import ...` | +### Azure & GCP Import Changes + +Azure and GCP SDKs now live under the `mistralai` namespace as separate distributions: + +| v1 | v2 | +|---|---| +| `from mistralai_azure import MistralAzure` | `from mistralai.azure.client import MistralAzure` | +| `from mistralai_azure.models import ...` | `from mistralai.azure.client.models import ...` | +| `from mistralai_gcp import MistralGoogleCloud` | `from mistralai.gcp.client import MistralGCP` | +| `from mistralai_gcp.models import ...` | `from mistralai.gcp.client.models import ...` | + +#### Installation Changes + +| v1 | v2 | +|---|---| +| `pip install mistralai` | `pip install mistralai` (includes Azure and GCP) | +| `pip install mistralai[gcp]` (for GCP auth) | `pip install "mistralai[gcp]"` (for GCP auth dependencies) | + +Azure and GCP are now standalone distributions that can be installed independently of the core SDK. The `mistralai[azure]` and `mistralai[gcp]` extras are syntactic sugar that pull in the respective distributions. + ### What Stays the Same - The `Mistral` client API is unchanged diff --git a/README.md b/README.md index 5c8a1b51..a774a9e1 100644 --- a/README.md +++ b/README.md @@ -104,7 +104,7 @@ It's also possible to write a standalone Python script without needing to set up ```python #!/usr/bin/env -S uv run --script # /// script -# requires-python = ">=3.9" +# requires-python = ">=3.10" # dependencies = [ # "mistralai", # ] @@ -374,38 +374,41 @@ You can run the examples in the `examples/` directory using `uv run`. **Prerequisites** -Before you begin, ensure you have `AZUREAI_ENDPOINT` and an `AZURE_API_KEY`. To obtain these, you will need to deploy Mistral on Azure AI. +Before you begin, ensure you have `AZURE_ENDPOINT` and an `AZURE_API_KEY`. To obtain these, you will need to deploy Mistral on Azure AI. See [instructions for deploying Mistral on Azure AI here](https://docs.mistral.ai/deployment/cloud/azure/). +**Step 1: Install** + +```bash +pip install mistralai +``` + +**Step 2: Example Usage** + Here's a basic example to get you started. You can also run [the example in the `examples` directory](/examples/azure). ```python -import asyncio import os +from mistralai.azure.client import MistralAzure -from mistralai_azure import MistralAzure - +# The SDK automatically injects api-version as a query parameter client = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=os.environ["AZURE_API_KEY"], + server_url=os.environ["AZURE_ENDPOINT"], + api_version="2024-05-01-preview", # Optional, this is the default ) -async def main() -> None: - res = await client.chat.complete_async( - max_tokens= 100, - temperature= 0.5, - messages= [ - { - "content": "Hello there!", - "role": "user" - } - ] - ) - print(res) - -asyncio.run(main()) +res = client.chat.complete( + model=os.environ["AZURE_MODEL"], + messages=[ + { + "role": "user", + "content": "Hello there!", + } + ], +) +print(res.choices[0].message.content) ``` -The documentation for the Azure SDK is available [here](packages/mistralai_azure/README.md). ### Google Cloud @@ -422,40 +425,43 @@ gcloud auth application-default login **Step 1: Install** -Install the extras dependencies specific to Google Cloud: - ```bash +pip install mistralai +# For GCP authentication support (required): pip install "mistralai[gcp]" ``` **Step 2: Example Usage** -Here's a basic example to get you started. - -```python -import asyncio -from mistralai_gcp import MistralGoogleCloud +Here's a basic example to get you started. You can also run [the example in the `examples` directory](/examples/gcp). -client = MistralGoogleCloud() +The SDK automatically: +- Detects credentials via `google.auth.default()` +- Auto-refreshes tokens when they expire +- Builds the Vertex AI URL from `project_id` and `region` +```python +import os +from mistralai.gcp.client import MistralGCP -async def main() -> None: - res = await client.chat.complete_async( - model= "mistral-small-2402", - messages= [ - { - "content": "Hello there!", - "role": "user" - } - ] - ) - print(res) +# The SDK auto-detects credentials and builds the Vertex AI URL +client = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region="us-central1", # Default: europe-west4 +) -asyncio.run(main()) +res = client.chat.complete( + model="mistral-small-2503", + messages=[ + { + "role": "user", + "content": "Hello there!", + } + ], +) +print(res.choices[0].message.content) ``` -The documentation for the GCP SDK is available [here](packages/mistralai_gcp/README.md). - ## Available Resources and Operations @@ -674,8 +680,8 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.models.list(, - RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) + res = mistral.models.list( + retries=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) # Handle response print(res) diff --git a/examples/azure/.env.example b/examples/azure/.env.example new file mode 100644 index 00000000..7467bf2e --- /dev/null +++ b/examples/azure/.env.example @@ -0,0 +1,4 @@ +AZURE_API_KEY=your-azure-api-key +AZURE_ENDPOINT=https://your-endpoint.services.ai.azure.com/models +AZURE_MODEL=your-deployment-name +AZURE_API_VERSION=2024-05-01-preview diff --git a/examples/azure/az_chat_no_streaming.py b/examples/azure/az_chat_no_streaming.py deleted file mode 100644 index 4d5530dc..00000000 --- a/examples/azure/az_chat_no_streaming.py +++ /dev/null @@ -1,15 +0,0 @@ -import os - -from mistralai_azure import MistralAzure -from mistralai_azure.models import ChatCompletionRequestMessages, UserMessage - -client = MistralAzure( - azure_api_key=os.environ["AZURE_API_KEY"], - azure_endpoint=os.environ["AZURE_ENDPOINT"], -) - -messages: list[ChatCompletionRequestMessages] = [ - UserMessage(content="What is the capital of France?"), -] -res = client.chat.complete(messages=messages) -print(res.choices[0].message.content) diff --git a/examples/azure/chat_no_streaming.py b/examples/azure/chat_no_streaming.py index 4d5530dc..952b171d 100644 --- a/examples/azure/chat_no_streaming.py +++ b/examples/azure/chat_no_streaming.py @@ -1,15 +1,22 @@ import os -from mistralai_azure import MistralAzure -from mistralai_azure.models import ChatCompletionRequestMessages, UserMessage +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.models import ChatCompletionRequestMessage, UserMessage +AZURE_API_KEY = os.environ.get("AZURE_API_KEY", "") +AZURE_ENDPOINT = os.environ.get("AZURE_ENDPOINT", "") +AZURE_MODEL = os.environ.get("AZURE_MODEL", "mistral-small-2503") +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter client = MistralAzure( - azure_api_key=os.environ["AZURE_API_KEY"], - azure_endpoint=os.environ["AZURE_ENDPOINT"], + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, ) -messages: list[ChatCompletionRequestMessages] = [ +messages: list[ChatCompletionRequestMessage] = [ UserMessage(content="What is the capital of France?"), ] -res = client.chat.complete(messages=messages) +res = client.chat.complete(model=AZURE_MODEL, messages=messages) print(res.choices[0].message.content) diff --git a/examples/gcp/.env.example b/examples/gcp/.env.example new file mode 100644 index 00000000..6721bd37 --- /dev/null +++ b/examples/gcp/.env.example @@ -0,0 +1,3 @@ +GCP_PROJECT_ID=your-gcp-project-id +GCP_REGION=us-central1 +GCP_MODEL=mistral-small-2503 diff --git a/examples/gcp/async_chat_no_streaming.py b/examples/gcp/async_chat_no_streaming.py index 178f151c..61a2d076 100755 --- a/examples/gcp/async_chat_no_streaming.py +++ b/examples/gcp/async_chat_no_streaming.py @@ -1,19 +1,43 @@ #!/usr/bin/env python +""" +Example: Async chat completion with GCP Vertex AI. + +The SDK automatically: +- Detects credentials via google.auth.default() +- Auto-refreshes tokens when they expire +- Builds the Vertex AI URL from project_id and region + +Prerequisites: + gcloud auth application-default login + +Usage: + GCP_PROJECT_ID=your-project GCP_REGION=us-central1 GCP_MODEL=mistral-small-2503 python async_chat_no_streaming.py +""" import asyncio import os -from mistralai_gcp import MistralGoogleCloud -from mistralai_gcp.models.usermessage import UserMessage +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.models import UserMessage +# Configuration from environment variables +GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID") # Optional: auto-detected from credentials +GCP_REGION = os.environ.get("GCP_REGION", "us-central1") +GCP_MODEL = os.environ.get("GCP_MODEL", "mistral-small-2503") -async def main(): - model = "mistral-large-2407" - client = MistralGoogleCloud(project_id=os.environ["GCP_PROJECT_ID"]) +async def main(): + # The SDK automatically handles: + # - Credential detection via google.auth.default() + # - Token refresh when expired + # - Vertex AI URL construction + client = MistralGCP( + project_id=GCP_PROJECT_ID, + region=GCP_REGION, + ) chat_response = await client.chat.complete_async( - model=model, + model=GCP_MODEL, messages=[UserMessage(content="What is the best French cheese?")], ) diff --git a/examples/gcp/gcp_async_chat_no_streaming.py b/examples/gcp/gcp_async_chat_no_streaming.py deleted file mode 100755 index 178f151c..00000000 --- a/examples/gcp/gcp_async_chat_no_streaming.py +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env python - -import asyncio -import os - -from mistralai_gcp import MistralGoogleCloud -from mistralai_gcp.models.usermessage import UserMessage - - -async def main(): - model = "mistral-large-2407" - - client = MistralGoogleCloud(project_id=os.environ["GCP_PROJECT_ID"]) - - chat_response = await client.chat.complete_async( - model=model, - messages=[UserMessage(content="What is the best French cheese?")], - ) - - print(chat_response.choices[0].message.content) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/packages/azure/.genignore b/packages/azure/.genignore new file mode 100644 index 00000000..6bdf6621 --- /dev/null +++ b/packages/azure/.genignore @@ -0,0 +1,6 @@ +pyproject.toml +src/mistralai/azure/client/sdk.py +src/mistralai/azure/client/_hooks/registration.py +README.md +USAGE.md +docs/sdks/**/README.md diff --git a/packages/mistralai_azure/.gitattributes b/packages/azure/.gitattributes similarity index 100% rename from packages/mistralai_azure/.gitattributes rename to packages/azure/.gitattributes diff --git a/packages/mistralai_azure/.gitignore b/packages/azure/.gitignore similarity index 100% rename from packages/mistralai_azure/.gitignore rename to packages/azure/.gitignore diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/azure/.speakeasy/gen.lock similarity index 69% rename from packages/mistralai_azure/.speakeasy/gen.lock rename to packages/azure/.speakeasy/gen.lock index a7cdba10..5cf1d8e1 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/azure/.speakeasy/gen.lock @@ -1,24 +1,25 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 2bebd9aadeecb18391d46d1dadc340ef + docChecksum: 571037b8485712afcef86703debb7f15 docVersion: 1.0.0 speakeasyVersion: 1.685.0 generationVersion: 2.794.1 - releaseVersion: 1.8.1 - configChecksum: 0448ba634aa36625c6ac276e17e3b3b5 + releaseVersion: 2.0.0a4 + configChecksum: 549cf1eae199d39bf97052462fd8e640 repoURL: https://github.com/mistralai/client-python.git - repoSubDirectory: packages/mistralai_azure - installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/mistralai_azure + repoSubDirectory: packages/azure + installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/azure published: true persistentEdits: - generation_id: ecb4f74f-ba8a-4f28-941d-36b3258200bd - pristine_commit_hash: 785c0560d42a9c4cff938392bb6d52d98a2f3529 - pristine_tree_hash: 50ed42d2e4b3d4ecd639935cd1511220354a41d7 + generation_id: b0dbfbbb-4028-4834-9980-a1d2dba52a8d + pristine_commit_hash: 6cab3cf0757d3c7dd58ee1eabec66dd63a8c9a03 + pristine_tree_hash: abf5c6e4b603142b1a6aac936d7c3be574611256 features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 + configurableModuleName: 0.2.0 constsAndDefaults: 1.0.5 core: 5.23.18 defaultEnabledRetries: 0.2.0 @@ -57,16 +58,12 @@ trackedFiles: pristine_git_object: 2e54e27e0ca97bee87918b2ae38cc6c335669a79 docs/models/assistantmessage.md: id: 7e0218023943 - last_write_checksum: sha1:e75d407349842b2de46ee3ca6250f9f51121cf38 - pristine_git_object: 3d0bd90b4433c1a919f917f4bcf2518927cdcd50 + last_write_checksum: sha1:47d5cd1a1bef9e398c12c207f5b3d8486d94f359 + pristine_git_object: 9ef638379aee1198742743800e778409c47a9b9d docs/models/assistantmessagecontent.md: id: 9f1795bbe642 last_write_checksum: sha1:1ce4066623a8d62d969e5ed3a088d73a9ba26643 pristine_git_object: 047b7cf95f4db203bf2c501680b73ca0562a122d - docs/models/assistantmessagerole.md: - id: bb5d2a4bc72f - last_write_checksum: sha1:82f2c4f469426bd476c1003a91394afb89cb7c91 - pristine_git_object: 658229e77eb6419391cf7941568164541c528387 docs/models/chatcompletionchoice.md: id: 0d15c59ab501 last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 @@ -77,12 +74,12 @@ trackedFiles: pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b docs/models/chatcompletionrequest.md: id: adffe90369d0 - last_write_checksum: sha1:de5476eb16a5ff75942b1ece68dbe547110dbbb8 - pristine_git_object: 104a1f96e60e1d4b86305dab2829be084b00b153 - docs/models/chatcompletionrequestmessages.md: - id: ec996b350e12 - last_write_checksum: sha1:2ecec8d12cdb48426f4eb62732066fc79fcd4ec3 - pristine_git_object: bc7708a67f06d74e8a5bf1facb2b23fb1e08053c + last_write_checksum: sha1:a404d37c6605a5524f1f48b418bacf46e86a9a68 + pristine_git_object: 3b0f7270840e257475f4b0f15f27e0c0152818d2 + docs/models/chatcompletionrequestmessage.md: + id: 3f5e170d418c + last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 + pristine_git_object: 91e9e062d0ef0cb69235c4ae4516548733ce28a9 docs/models/chatcompletionrequeststop.md: id: fcaf5bbea451 last_write_checksum: sha1:71a25f84f0d88c7acf72e801ced6159546201851 @@ -97,8 +94,16 @@ trackedFiles: pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b docs/models/chatcompletionstreamrequest.md: id: cf8f29558a68 - last_write_checksum: sha1:f6bc4a0f064fc3420ae9b29c7e6fc9100ae85e4d - pristine_git_object: 85f237b4fc59ffc487377f150952284cc2102d85 + last_write_checksum: sha1:daca00885f0d0f9863d8420bbee514723084813d + pristine_git_object: f78156a647ec63ca60ff423acbdee2b2404e4e60 + docs/models/chatcompletionstreamrequestmessage.md: + id: 053a98476cd2 + last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 + pristine_git_object: 2e4e93acca8983a3ea27b391d4606518946e13fe + docs/models/chatcompletionstreamrequeststop.md: + id: d0e89a4dca78 + last_write_checksum: sha1:a889e9580fa94bda7c848682d6ba501b7f5c0f41 + pristine_git_object: a48460a92ac47fec1de2188ba46b238229736d32 docs/models/chatcompletionstreamrequesttoolchoice.md: id: 210d5e5b1413 last_write_checksum: sha1:0543164caf3f4fb2bef3061dbd1a5e6b34b17ae9 @@ -113,40 +118,36 @@ trackedFiles: pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d docs/models/completionresponsestreamchoice.md: id: d56824d615a6 - last_write_checksum: sha1:dcf4b125b533192cb5aea1a68551866954712dc5 - pristine_git_object: c807dacd98eb3561ee45f40db71a92cb72b0f6de - docs/models/content.md: - id: bfd859c99f86 - last_write_checksum: sha1:6673dbd19871a701955a322348a4f7e51c38ffc8 - pristine_git_object: a833dc2c6043e36b85131c9243b4cc02b9fcc4c6 + last_write_checksum: sha1:0296a490df009dbfd04893fdebcc88dd6102a872 + pristine_git_object: 1532c25b8fc065d486f52d4610a7f757e5340875 + docs/models/completionresponsestreamchoicefinishreason.md: + id: 5f1fbfc90b8e + last_write_checksum: sha1:20824b4a223cbd3658b32440973a7d47dcd108b9 + pristine_git_object: 0fece473297227c75db4e7ded63417a2f117cac0 docs/models/contentchunk.md: id: d2d3a32080cd last_write_checksum: sha1:5839a26cdc412b78caad7fb59df97bdcea57be6d pristine_git_object: 22023e8b19692df969693b7a14f8cf6e0143859f docs/models/deltamessage.md: id: 6c5ed6b60968 - last_write_checksum: sha1:c213149256c620715d744c89685d5b6cbdea6f58 - pristine_git_object: 61deabbf7e37388fdd4c1789089d120cc0b937b9 + last_write_checksum: sha1:00052476b9b2474dbc149f18dd18c71c86d0fc74 + pristine_git_object: e0ee575f3fce7c312114ce8c5390efc5c4854952 + docs/models/deltamessagecontent.md: + id: 7307bedc8733 + last_write_checksum: sha1:a1211b8cb576ad1358e68983680ee326c3920a5e + pristine_git_object: 8142772d7ea33ad8a75cf9cf822564ba3f630de2 docs/models/document.md: id: cd1d2a444370 last_write_checksum: sha1:d00a2ac808a0ae83a7b97da87e647ecc8dca9c52 pristine_git_object: 509d43b733d68d462853d9eb52fc913c855dff40 docs/models/documenturlchunk.md: id: 48437d297408 - last_write_checksum: sha1:38c3e2ad5353a4632bd827f00419c5d8eb2def54 - pristine_git_object: 6c9a5b4d9e6769be242b27ef0208f6af704689c0 - docs/models/documenturlchunktype.md: - id: a3574c91f539 - last_write_checksum: sha1:a0134fc0ea822d55b1204ee71140f2aa9d8dbe9c - pristine_git_object: 32e1fa9e975a3633fb49057b38b0ea0206b2d8ef + last_write_checksum: sha1:5f9294355929d66834c52c67990ba36a7f81387d + pristine_git_object: 9dbfbe5074de81b9fcf6f5bae8a0423fb2c82f71 docs/models/filechunk.md: id: edc076728e9d last_write_checksum: sha1:07ab5db503211adba2fa099e66d12ac3c4bbf680 pristine_git_object: 18217114060ac4e4b45fefabace4628684f27e5c - docs/models/finishreason.md: - id: 73315c2a39b3 - last_write_checksum: sha1:5b58c7fa9219f728b9731287e21abe1be9f11e4a - pristine_git_object: 45a5aedb7241cf080df3eb976a4413064d314009 docs/models/format_.md: id: a17c22228eda last_write_checksum: sha1:dad6de59fec6378d50356007602e2a0254d8d2e4 @@ -167,22 +168,22 @@ trackedFiles: id: a211c095f2ac last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc + docs/models/imagedetail.md: + id: f8217529b496 + last_write_checksum: sha1:fdf19ac9459f64616240955cb81a84ef03e775c8 + pristine_git_object: 1e5ba3fd405a14e5e2872cc85504584dca19b726 docs/models/imageurl.md: id: e75dd23cec1d - last_write_checksum: sha1:30131c77dd240c3bae48d9693698358e5cc0ae63 - pristine_git_object: 7c2bcbc36e99c3cf467d213d6a6a59d6300433d8 + last_write_checksum: sha1:a5cf621ce58a9cc7c96afa7de53367eac7b4cb0b + pristine_git_object: 6358e0acb2dea4816203413842243704ca955783 docs/models/imageurlchunk.md: id: 4407097bfff3 - last_write_checksum: sha1:7a478fd638234ece78770c7fc5e8d0adaf1c3727 - pristine_git_object: f1b926ef8e82443aa1446b1c64c2f02e33d7c789 - docs/models/imageurlchunkimageurl.md: - id: c7fae88454ce - last_write_checksum: sha1:5eff71b7a8be7baacb9ba8ca0be0a0f7a391a325 - pristine_git_object: 767389082d25f06e617fec2ef0134dd9fb2d4064 - docs/models/imageurlchunktype.md: - id: b9af2db9ff60 - last_write_checksum: sha1:990546f94648a09faf9d3ae55d7f6ee66de13e85 - pristine_git_object: 2064a0b405870313bd4b802a3b1988418ce8439e + last_write_checksum: sha1:da7a792f7b649f311062338dfbf3d25ff55fe6c5 + pristine_git_object: db0c53d22e29fa25222edb86b264e5135879a029 + docs/models/imageurlunion.md: + id: 9d3c691a9db0 + last_write_checksum: sha1:4e32bcd7d44746d2ddbfafbef96152bb2bdb2a15 + pristine_git_object: db97130f26199dcb354ecb7469d09530b035daa2 docs/models/jsonschema.md: id: a6b15ed6fac8 last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f @@ -191,10 +192,6 @@ trackedFiles: id: b071d5a509cc last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 - docs/models/messages.md: - id: 2103cd675c2f - last_write_checksum: sha1:f6940c9c67b98c49ae2bc2764f6c14178321f244 - pristine_git_object: 1d394500e8ffdd140457575568fc2ce465a1cc3a docs/models/mistralpromptmode.md: id: d17d5db4d3b6 last_write_checksum: sha1:abcb7205c5086169c7d9449d15ac142448a7d258 @@ -233,12 +230,8 @@ trackedFiles: pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 docs/models/referencechunk.md: id: 07895f9debfd - last_write_checksum: sha1:97d01dd2b907e87b58bebd9c950e1bef29747c89 - pristine_git_object: a132ca2fe6fbbaca644491cbc36d88b0c67cc6bc - docs/models/referencechunktype.md: - id: 0944b80ea9c8 - last_write_checksum: sha1:956b270766c7f11fe99f4a9b484cc29c159e7471 - pristine_git_object: 1e0e2fe64883ef5f3e628777b261b1224661d257 + last_write_checksum: sha1:4384049375a2566c7567599f97ce1ec19e9f6276 + pristine_git_object: d847e24845a399c7ca93d54701832fb65e01b3ab docs/models/responseformat.md: id: 50a1e4140614 last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add @@ -247,22 +240,14 @@ trackedFiles: id: cf1f250b82db last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f - docs/models/role.md: - id: b694540a5b1e - last_write_checksum: sha1:260a50c56a8bd03cc535edf98ebec06437f87f8d - pristine_git_object: affca78d5574cc42d8e6169f21968e5a8765e053 docs/models/security.md: id: 452e4d4eb67a last_write_checksum: sha1:ce2871b49c1632d50e22d0b1ebe4999021d52313 pristine_git_object: c698674c513f5b20c04f629e50154e67977275f7 - docs/models/stop.md: - id: f231cc9f5041 - last_write_checksum: sha1:86903cac5f57ad9b8ac07ecba6c454d40a53bdc8 - pristine_git_object: ba40ca83136d6d6cb4f1ef9e5ca3104a704e4846 docs/models/systemmessage.md: id: fdb7963e1cdf - last_write_checksum: sha1:97e726dff19a39b468767d5c01fc6256277ee71f - pristine_git_object: 0dba71c00f40c85e74b2c1967e077ffff9660f13 + last_write_checksum: sha1:c7603c5ce77ba2bcbda9eff65eeafdb1e9ecbec7 + pristine_git_object: 10bda10f921fb5d66c1606ff18e654b4e78ab197 docs/models/systemmessagecontent.md: id: 94a56febaeda last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb @@ -277,16 +262,12 @@ trackedFiles: pristine_git_object: 54f029b814fdcfa2e93e2b8b0594ef9e4eab792a docs/models/textchunk.md: id: 6cd12e0ef110 - last_write_checksum: sha1:6d41d1991d122805734ed0d90ee01592aa5ae6ff - pristine_git_object: 6daab3c381bd8c13d2935bf62578648a8470fc76 + last_write_checksum: sha1:aa448d4937c0c1cd562621f0a9080aa0dc6e4bd1 + pristine_git_object: b266619dcb57222ec343f373c43b2b5cef5b8b93 docs/models/thinkchunk.md: id: bca24d7153f6 - last_write_checksum: sha1:feb95a931bb9cdbfe28ab351618687e513cf830b - pristine_git_object: 66b2e0cde70e25e2927180d2e709503401fddeab - docs/models/thinkchunktype.md: - id: 0fbeed985341 - last_write_checksum: sha1:790f991f95c86c26a6abb9c9c5debda8b53526f5 - pristine_git_object: baf6f755252d027295be082b53ecf80555039414 + last_write_checksum: sha1:2b8ff7737fa7255673ca31da7cb2e6803fce9e02 + pristine_git_object: b07f598ebc5f0e9c041186c081dc98bc21104bdb docs/models/thinking.md: id: 07234f8dd364 last_write_checksum: sha1:a5962d1615b57996730da19e59fbfaa684321442 @@ -309,40 +290,28 @@ trackedFiles: pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 docs/models/toolmessage.md: id: 0553747c37a1 - last_write_checksum: sha1:3ac87031fdd4ba8b0996e95be8e7ef1a7ff41167 - pristine_git_object: a54f49332c2873471759b477fb4c712fa4fb61f5 + last_write_checksum: sha1:ac61e644ba7c6da607cb479eafd1db78d8e8012e + pristine_git_object: 7201481e61e269b238887deec30c03f7e16c53d7 docs/models/toolmessagecontent.md: id: f0522d2d3c93 last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 - docs/models/toolmessagerole.md: - id: f333d4d1ab56 - last_write_checksum: sha1:7e1c004bad24e928da0c286a9f053516b172d24f - pristine_git_object: c24e59c0c79ea886d266e38c673edd51531b9be6 docs/models/tooltypes.md: id: adb50fe63ea2 last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c pristine_git_object: 84e49253c9b9bd1bd314e2a126106404cbb52f16 - docs/models/type.md: - id: 98c32f09b2c8 - last_write_checksum: sha1:8aa9ca999e9648ddc2240bf80780684e3e858ddf - pristine_git_object: eb0581e7174b6951d69c485a64af5244cb8687fa docs/models/usageinfo.md: id: ec6fe65028a9 last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 docs/models/usermessage.md: id: ed66d7a0f80b - last_write_checksum: sha1:8291f7703e49ed669775dc953ea8cab6715dc7ed - pristine_git_object: 63b0131091cd211b3b1477c1d63b5666a26db546 + last_write_checksum: sha1:f0ed7d9cb7264f1d9e4a9190772df3f15e25346c + pristine_git_object: e7a932ed71496fa7cc358388c650d25f166f27a4 docs/models/usermessagecontent.md: id: 52c072c851e8 last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a pristine_git_object: 8350f9e8f8996c136093e38760990f62fd01f8cf - docs/models/usermessagerole.md: - id: 99ffa937c462 - last_write_checksum: sha1:52014480516828b43827aa966b7319d9074f1111 - pristine_git_object: 171124e45988e784c56a6b92a0057ba00efc0db4 docs/models/utils/retryconfig.md: id: 4343ac43161c last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d @@ -361,344 +330,348 @@ trackedFiles: pristine_git_object: a8fcb932ba2a01c5e96e3b04c59371e930b75558 scripts/prepare_readme.py: id: e0c5957a6035 - last_write_checksum: sha1:2291075229aebf4e036800b5b9299b37fcb8707c - pristine_git_object: ff1121fda23730f356d2df2ad17c8e991b9fc605 + last_write_checksum: sha1:26b29aad3c23a98912fd881698c976aac55749fe + pristine_git_object: 2b2577ea83873f64aa9f91d9d762bc6e1f250977 scripts/publish.sh: id: fe273b08f514 last_write_checksum: sha1:b290b25b36dca3d5eb1a2e66a2e1bcf2e7326cf3 pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 - src/mistralai_azure/__init__.py: - id: 3cd9e92c2f72 + src/mistralai/azure/client/__init__.py: + id: 5624bda9196d last_write_checksum: sha1:36306d1d404b6aeb912d27f1d9c52f098ff7bf9b pristine_git_object: dd02e42e4cc509dc90e6ae70493054021faa5f9c - src/mistralai_azure/_hooks/__init__.py: - id: 66932eacf398 + src/mistralai/azure/client/_hooks/__init__.py: + id: 850c237217cb last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 - src/mistralai_azure/_hooks/sdkhooks.py: - id: 1184c9201c62 - last_write_checksum: sha1:c98774db1664db2bc6d80e8a5f4f5133260f201a - pristine_git_object: 37ff4e9f0ebd42a58ada6300098a5b1b85a54b69 - src/mistralai_azure/_hooks/types.py: - id: a32fe1943bce - last_write_checksum: sha1:78fc31840a38e668a73871885c779929196a8bec - pristine_git_object: 0c22d7ebccdd64097033454b7c698d10ee59987d - src/mistralai_azure/_version.py: - id: 7711a0bb1da3 - last_write_checksum: sha1:9a446d67d6a86cdf9d9e3447c1c09a4f719b2c9b - pristine_git_object: 79277f9a358b4c851363e11e1e8f534779e9f271 - src/mistralai_azure/basesdk.py: - id: 7d825dbc7d6e - last_write_checksum: sha1:4070786599952b3c603d1384d87d7b92bb13b974 - pristine_git_object: 89f7dc493d7f50d5f2d3f468c0a8392a6ec5e28b - src/mistralai_azure/chat.py: - id: ebf1c99bea88 - last_write_checksum: sha1:2d78fa9e8b3e300e18b6fb3bc116e824261efb55 - pristine_git_object: 10bb247fb89f0f9ef110300224c95f2a7653ad2f - src/mistralai_azure/httpclient.py: - id: 808a3f534ffa + src/mistralai/azure/client/_hooks/sdkhooks.py: + id: e9923767446c + last_write_checksum: sha1:ae162d6e73be0eb767c353c815d76b034395d50f + pristine_git_object: 2080681b7f2c52fcb80dcb95eff48654763e6258 + src/mistralai/azure/client/_hooks/types.py: + id: 07c892e06527 + last_write_checksum: sha1:fde2e0f6da6930232b67682009de520724b23398 + pristine_git_object: 3e4e39555d60adebe84e596c8323ee5b80676fc9 + src/mistralai/azure/client/_version.py: + id: a77160e60e5d + last_write_checksum: sha1:e26eb828e9a240042acc754f38dcf2e581e045aa + pristine_git_object: 4448d2a0fd803f43820378359c921d09eba6f43e + src/mistralai/azure/client/basesdk.py: + id: 5a585a95ec21 + last_write_checksum: sha1:d7a4a959d7d3ca3cd22d8daf144c3b4d5c0d1210 + pristine_git_object: b0391ac078b4e2a5d9107ed014c1ca939a553c23 + src/mistralai/azure/client/chat.py: + id: c18454e628d7 + last_write_checksum: sha1:cc1ff54b85ce494428ebf22ec01bd1199cd9e2b6 + pristine_git_object: 3348bf47eafb3fcfb2de0e7d512073e947b69554 + src/mistralai/azure/client/httpclient.py: + id: 60c81037fbd0 last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 pristine_git_object: 89560b566073785535643e694c112bedbd3db13d - src/mistralai_azure/models/__init__.py: - id: e5fcf3933d2c - last_write_checksum: sha1:43f0ebb24f64a382fd18202da65a321d9925dbee - pristine_git_object: 9baa3ff1865cd8aec9e9b93d6e3c315e8c7870c5 - src/mistralai_azure/models/assistantmessage.py: - id: 15f117b45380 - last_write_checksum: sha1:3c2872d06ad465dbbbedcf8d397d1f12961e1e2e - pristine_git_object: 7790eb10a034d892c3c1e793c412c75ff8820e40 - src/mistralai_azure/models/chatcompletionchoice.py: - id: 93cfc6cec0d2 - last_write_checksum: sha1:f5dfcf407d8abd5ce8eb23f058c589861c71a0df - pristine_git_object: 7c6eb933faf09cc5c6102575d371ac280f2e242d - src/mistralai_azure/models/chatcompletionrequest.py: - id: d046a16b5e58 - last_write_checksum: sha1:bc1e0b5c8b11bfef5a9b135436c2f3f555a11fa3 - pristine_git_object: a7b095f34c572e1805650e44f847946280ccb3fe - src/mistralai_azure/models/chatcompletionresponse.py: - id: fc342e80f579 - last_write_checksum: sha1:a93593ec734420bc122f0b0b8c49d630795f1d42 - pristine_git_object: 7a66f3221a154b1a0f47c4f808ece8e580280548 - src/mistralai_azure/models/chatcompletionstreamrequest.py: - id: 1052b055a791 - last_write_checksum: sha1:18f71c5eeda25d23f2c82ddcdb710a20b44b806c - pristine_git_object: 96cd631b0ed74e5c82c6e2492011001021b019f8 - src/mistralai_azure/models/completionchunk.py: - id: e04bc380589d - last_write_checksum: sha1:490c3236276ae8fdecb883744e263aecbe4c608c - pristine_git_object: d6cc2a86a1fda1ebce1f3c5a169ab1118705e3f0 - src/mistralai_azure/models/completionevent.py: - id: e75909f919b1 - last_write_checksum: sha1:9f5423ad56747fb7cc95a6f01e0826510571d4c1 - pristine_git_object: 5a2039c2492bab82184b4f2469806f8b977a7246 - src/mistralai_azure/models/completionresponsestreamchoice.py: - id: 24fe265a60d8 - last_write_checksum: sha1:df52342c3458cca6396d538c5d9a42f07131796d - pristine_git_object: 0e890aacf79f8f220f585d914c6fbe8863232036 - src/mistralai_azure/models/contentchunk.py: - id: 9e6b90acdf54 - last_write_checksum: sha1:e93c57ef87654a06d8849030f65db3d279f8f7ad - pristine_git_object: e6a3e24a8857ea1661874197eec967f0ac99e31d - src/mistralai_azure/models/deltamessage.py: - id: 593eaaeda97b - last_write_checksum: sha1:9c2f6e52c81d2f5bf71f520861158dc5eae6eab7 - pristine_git_object: 7fa3c3f216153ebc0a2d31e590793698e95a8be8 - src/mistralai_azure/models/documenturlchunk.py: - id: bff69bfa8014 - last_write_checksum: sha1:5c515c4c85b78d8f4cf147faab9cf01c3501e0b9 - pristine_git_object: ea8d5625a6d1579dd60f2e4a067f455c82334986 - src/mistralai_azure/models/filechunk.py: - id: 0de687fe41c1 - last_write_checksum: sha1:56a1765b46702d24ee9c00ab3a06ccdbffdd63f9 - pristine_git_object: 2c3edc078b5e781b4d7163ab01e02a3347c81e2f - src/mistralai_azure/models/function.py: - id: 16111a6101f2 - last_write_checksum: sha1:456d34df457592f1975b0d1e158207d4446a6c41 - pristine_git_object: a4642f92a0cf614b458591c220a83ae1c422ce25 - src/mistralai_azure/models/functioncall.py: - id: e383b31a7f16 - last_write_checksum: sha1:cec288f925fa58842bb7d9e688f6122a01973d4b - pristine_git_object: dd93c4629c3bd81dd6fb305474ce0cd5443e1bdb - src/mistralai_azure/models/functionname.py: - id: ebc3e07e4b6f - last_write_checksum: sha1:743cec4c3f586d67d1ab2816d8d76170f46a3ca1 - pristine_git_object: b55c82af3f29efe38698bc776a8532c647dccc36 - src/mistralai_azure/models/httpvalidationerror.py: - id: da4825943f94 - last_write_checksum: sha1:dce58ead8f7f901514250e1ae5965ba039b1da14 - pristine_git_object: 56607d9437ce39097deac134d4f622ea523cbda7 - src/mistralai_azure/models/imageurl.py: - id: 80cc0df94e9d - last_write_checksum: sha1:a1a416ae5bf9c559219cff5f008a90f251a52477 - pristine_git_object: a5a66360b017cbdc342775241aa4aa2322534c6a - src/mistralai_azure/models/imageurlchunk.py: - id: c5c6dd2f1782 - last_write_checksum: sha1:11634325be12aa567b42227f2117e9b8c854a51c - pristine_git_object: a40e451c60caca688a9379dcb20d545e9e6b76e2 - src/mistralai_azure/models/jsonschema.py: - id: 8c635811dd6b - last_write_checksum: sha1:a99a6de224e51eb6cf85fa6de8cf37266ab5fe6d - pristine_git_object: 0f7563fc17bf172d527d09507294b4ef5646c22c - src/mistralai_azure/models/mistralazureerror.py: - id: a919897c4ea9 + src/mistralai/azure/client/models/__init__.py: + id: "335011330e21" + last_write_checksum: sha1:9afe0f0fb324a2b3c60ec98ce78b1ff6f908db39 + pristine_git_object: 51db6a383ddbab2d946b00c41934359a7eb50448 + src/mistralai/azure/client/models/assistantmessage.py: + id: 353ed9110f97 + last_write_checksum: sha1:e444c76e27b9b745b9238894bdf2b6a40bba6e6e + pristine_git_object: f5793f9455485c576293b44fb548be8bae9c7a65 + src/mistralai/azure/client/models/chatcompletionchoice.py: + id: 6942c7db5891 + last_write_checksum: sha1:817bfda6120a98248322c308629e404081e01279 + pristine_git_object: 67b5ba694217f4f3b95589d7f84af6a9bea9802d + src/mistralai/azure/client/models/chatcompletionrequest.py: + id: 0c711c870184 + last_write_checksum: sha1:fae2a92375aa3e58c258e4497acead859cd3b6dc + pristine_git_object: 921790959880ddf9b9ffce15d881e01f8adefa86 + src/mistralai/azure/client/models/chatcompletionresponse.py: + id: bdfacf065e9e + last_write_checksum: sha1:c72fb624e7475a551d37e0b291b64bcf772c402a + pristine_git_object: d41f9c6fab670cf7c961f50b1302f9a88cf48162 + src/mistralai/azure/client/models/chatcompletionstreamrequest.py: + id: da00a7feb4ef + last_write_checksum: sha1:c8c84c818b3b22bfec1e7f5737bbb281088dd3ba + pristine_git_object: be21eed2ecbe8354eb9a4bfa48122b28dada4aaf + src/mistralai/azure/client/models/completionchunk.py: + id: 28d620f25510 + last_write_checksum: sha1:413545e0521539346bff6e77fdec0c9e383bde17 + pristine_git_object: b94284b2d9c29c25a2f8eaa02828e2a205f4407e + src/mistralai/azure/client/models/completionevent.py: + id: a6f00a747933 + last_write_checksum: sha1:3d04bfbdaf11c52af5613ed0fd70c8dbc59f6d49 + pristine_git_object: c4b272871d9b3ea8443f469d29b0825706c25c00 + src/mistralai/azure/client/models/completionresponsestreamchoice.py: + id: 3ba5d7ba8a13 + last_write_checksum: sha1:f917300daf4febec7661f2c73bae675600ee0bdd + pristine_git_object: 2a4d053feb84cf2a9675d76ae08c83945b26644c + src/mistralai/azure/client/models/contentchunk.py: + id: 1f65e4f8f731 + last_write_checksum: sha1:79efbc90c1ae36b74492666125fb3e5ecaa5c27a + pristine_git_object: 0f09f76703efd95fcd96377b8ec6870d58dbf829 + src/mistralai/azure/client/models/deltamessage.py: + id: b7dab1d158de + last_write_checksum: sha1:553fdff5a3aec6909417be3cb390d99421af1693 + pristine_git_object: 2c01feae56c44d256f1e579c15f08e167dcc6481 + src/mistralai/azure/client/models/documenturlchunk.py: + id: e56fec6e977f + last_write_checksum: sha1:a43cee08f935933bf715b2f1a82b4c746b591f35 + pristine_git_object: 345bafc2bfe3cc056d746cf8151cf53b68771414 + src/mistralai/azure/client/models/filechunk.py: + id: 150d9f180110 + last_write_checksum: sha1:df1e010006338f6dd37009f2547ab8f0b90b917a + pristine_git_object: 829f03d84c25dd859d514ffa26e570f235e4e75b + src/mistralai/azure/client/models/function.py: + id: 6d1e2011a14b + last_write_checksum: sha1:62df160db82853d79907cccff4d0904f6bb9f142 + pristine_git_object: f4edce0fb8563f485d9a63a42439a9b2593a7f40 + src/mistralai/azure/client/models/functioncall.py: + id: ced560a1bd57 + last_write_checksum: sha1:490cb3a0305994de063e06fa4c77defa911271f3 + pristine_git_object: d476792ccbb5aa2002deb870f1c81cc1500f59d4 + src/mistralai/azure/client/models/functionname.py: + id: 6f09474ebc85 + last_write_checksum: sha1:651ceed24416ce8192f70db03cc5cd0db685899f + pristine_git_object: 839e0d557a902da6c819210962e38e1df9bda90f + src/mistralai/azure/client/models/httpvalidationerror.py: + id: ca155413681b + last_write_checksum: sha1:9dea33d9c74bbdf842ee9d157e4aaa05c36ae34a + pristine_git_object: 40bccddc4d0c0e761d70af713387561101e20b60 + src/mistralai/azure/client/models/imagedetail.py: + id: de211988043d + last_write_checksum: sha1:812f2ec4fc0d8d13db643ed49192384d5a841aa4 + pristine_git_object: 2d074cee614e1c49b69ee4073c3aaaa7a5a2c9e2 + src/mistralai/azure/client/models/imageurl.py: + id: c8882341c798 + last_write_checksum: sha1:443ee3739b3801928b4f3d4256531078fc4045e8 + pristine_git_object: b3c705e3f261ebd59f40e46785577694d80f98bf + src/mistralai/azure/client/models/imageurlchunk.py: + id: b6f0abb574d7 + last_write_checksum: sha1:4651f12f779bc86874c8516f06e39b882e414c92 + pristine_git_object: ee6de50f2add830c19d0b8b030a7c7a2ab65cb11 + src/mistralai/azure/client/models/jsonschema.py: + id: bfd486f4bb18 + last_write_checksum: sha1:ffe7190393086a4301aaffa6854cb3d80b0db92f + pristine_git_object: 5aaa490af350ac1c436dafb3d3c73d56402cac11 + src/mistralai/azure/client/models/mistralazureerror.py: + id: 31ed29254e67 last_write_checksum: sha1:25f4411c7411faad753d46118edf74828b1c9f7c pristine_git_object: c5bf17528c7cf25bac8f8874f58692c601fcdd76 - src/mistralai_azure/models/mistralpromptmode.py: - id: f62a521bcdae - last_write_checksum: sha1:82190bc14d2e51440723176cb8108791485c1180 - pristine_git_object: 77230b7e5e61cc662fdc52c72e8b817a15e183c3 - src/mistralai_azure/models/no_response_error.py: - id: 54523e14f29b + src/mistralai/azure/client/models/mistralpromptmode.py: + id: d0028b1e4129 + last_write_checksum: sha1:46fe1ab8ac2d5867877368a59a4aa5be2fabadeb + pristine_git_object: 26e7adbdc4a981c92d51b72542c966b0ba0fb8f8 + src/mistralai/azure/client/models/no_response_error.py: + id: a956d6cd06f0 last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 - src/mistralai_azure/models/ocrimageobject.py: - id: 6c349909fb0c - last_write_checksum: sha1:0fed6abf8172f6ee40e703ef86ee9d902c6e5d7e - pristine_git_object: 9d0dd01dbb5be095e234aa3ec9469fface68c3d2 - src/mistralai_azure/models/ocrpagedimensions.py: - id: f33f598001b2 - last_write_checksum: sha1:5281879ef3d737a17a539cefda9f222302ead7da - pristine_git_object: efb62a58f22ad62c730b3af93bff151586105957 - src/mistralai_azure/models/ocrpageobject.py: - id: 99f20768c4d6 - last_write_checksum: sha1:c7479b83b0eb619e6b0f82344e81bc691f0b3a46 - pristine_git_object: e95718001e07bb89ba2fc9094f88b894572148bb - src/mistralai_azure/models/ocrrequest.py: - id: 4e574d5fb9be - last_write_checksum: sha1:1b03dc8b392069f6b142228e74179c8341b09ffa - pristine_git_object: e9c23afcdd7440660f17c7819406d7e603eabbec - src/mistralai_azure/models/ocrresponse.py: - id: 326a4d9fab25 - last_write_checksum: sha1:cf597498a5841a56bbd1aeb8478bd57a01d93cb1 - pristine_git_object: 3e43fa8eb7b80fafbd9344ad5a98c0ead98c54cb - src/mistralai_azure/models/ocrtableobject.py: - id: 3ba1292c343a - last_write_checksum: sha1:2d1d05902a9ed6bccdb41ccac6782f015450cf2e - pristine_git_object: 189f059eaa8a32cc32a5320ea9fe33d779e8ef1c - src/mistralai_azure/models/ocrusageinfo.py: - id: 0de4eae62e4b - last_write_checksum: sha1:85e5a850bd2f847e4a02b0731b0327ca0a02f643 - pristine_git_object: 1f5c9f1bc2cf2d728dec06b0930602852474a29e - src/mistralai_azure/models/prediction.py: - id: 9e8a0a7a3ca7 - last_write_checksum: sha1:e78af600f109a7489a5bcce80b48adf29cc0c4c3 - pristine_git_object: b23a935c00cd7ce4e7b7bd6fe8f2da87f8aaca92 - src/mistralai_azure/models/referencechunk.py: - id: 420a12dfec3b - last_write_checksum: sha1:f49da7a4541f55b283e9391e6397a9e4286570bd - pristine_git_object: 32d2ca68e67be3f03e14f74fd7e7692fa05b70f5 - src/mistralai_azure/models/responseformat.py: - id: aa7acbc1bda7 - last_write_checksum: sha1:70e7960bb4ec5db5f133c4cc8f6e813e39f8c671 - pristine_git_object: c989f3a4467c21416ea59b33fbc734a1477a6eb3 - src/mistralai_azure/models/responseformats.py: - id: 780a7aa0e87e + src/mistralai/azure/client/models/ocrimageobject.py: + id: 9c9f987d94bb + last_write_checksum: sha1:b86f5187d1c425ddf27ed4815657a7c41d71855c + pristine_git_object: 38e9d3e48df5cee8cdd0cd1d7b6df62182814104 + src/mistralai/azure/client/models/ocrpagedimensions.py: + id: 7669a25f32b3 + last_write_checksum: sha1:60642db6bb61f0e96204fb78d3aa0bd80dd0a7e5 + pristine_git_object: 12858da92de99aa6da9d6e148df3ba7ee37496c7 + src/mistralai/azure/client/models/ocrpageobject.py: + id: eea193b05126 + last_write_checksum: sha1:baada584537b75e2e184738424068e61afe263c7 + pristine_git_object: 5fb821c19fd3cca2c2e149bd058a7ca49d2d002b + src/mistralai/azure/client/models/ocrrequest.py: + id: 365a5b4776a2 + last_write_checksum: sha1:9d3a9bccd341219934470688d3818557231b9b62 + pristine_git_object: fece2713166fc943194b7b38ec9b82db295bba0a + src/mistralai/azure/client/models/ocrresponse.py: + id: b8cde8c16a4c + last_write_checksum: sha1:e6f08c68f0388919ca7bcbc4f0cb134525053fcd + pristine_git_object: 787289fa995ba6cbf4b2ef3d3c41edb31f656674 + src/mistralai/azure/client/models/ocrtableobject.py: + id: c2cd51b8789e + last_write_checksum: sha1:11052d42f0d91916f038437923ea656bf882032c + pristine_git_object: 3e3c25830a3216f4ef325f5b1056a0c1a267b090 + src/mistralai/azure/client/models/ocrusageinfo.py: + id: 5e9118cac468 + last_write_checksum: sha1:6b27c09b5ec447c6ede22aa75190a1e06353349c + pristine_git_object: e2ceba35eb3f6e148389a7fd466dea5c051480a4 + src/mistralai/azure/client/models/prediction.py: + id: bd6abfa93083 + last_write_checksum: sha1:87eb3c43fa31b245c13c4708602b300956aa9efb + pristine_git_object: 6b8d6480b9ba1cb6683bdc93c24fb762ccfba146 + src/mistralai/azure/client/models/referencechunk.py: + id: c9612f854670 + last_write_checksum: sha1:b96507bcc82939fa4057532ef7e6a440baabd973 + pristine_git_object: e0bcb06be4d4c8d947ee267a9728aeae3a2c52fe + src/mistralai/azure/client/models/responseformat.py: + id: c124e7c316aa + last_write_checksum: sha1:f8c9e581053d1d885196c210a219a3e7aa086610 + pristine_git_object: 39fb03a25efdbc0a92ea91c72038ddd86ee056be + src/mistralai/azure/client/models/responseformats.py: + id: fef416cefcd4 last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 - src/mistralai_azure/models/responsevalidationerror.py: - id: 1952c765e2ec - last_write_checksum: sha1:d516c0c88210dd28b65747daa2fa1b63f432fe89 - pristine_git_object: a33954ccead3a8df87bdcc30a090efbb0ebecb94 - src/mistralai_azure/models/sdkerror.py: - id: bd8616367442 - last_write_checksum: sha1:41c259fac1bd50d33f1a2fd64d1ed17fd8d0d075 - pristine_git_object: 216d7f8fca986ac29162a1a7cba8c18b7f73d012 - src/mistralai_azure/models/security.py: - id: 7b3bcb55164e - last_write_checksum: sha1:9cacce270a27809ded4ee91aecac4a681154f5f0 - pristine_git_object: c1ae83138b09eab742f755a0f11428cf8c0fd60d - src/mistralai_azure/models/systemmessage.py: - id: 2e15bb043753 - last_write_checksum: sha1:8ec96bfc0533414a698d92387021cac116eadade - pristine_git_object: f99bf4ffb112b068159a3b95bc99ec7ce91b3f7d - src/mistralai_azure/models/systemmessagecontentchunks.py: - id: b6d9a4838359 - last_write_checksum: sha1:1e3f4688317d10f207dd42ef39cf2ac8f6042e54 - pristine_git_object: 4615a16cf39496dffc4982c6f0552d8bf353e280 - src/mistralai_azure/models/textchunk.py: - id: c169e3f0ffc9 - last_write_checksum: sha1:6cb623bafd4005e527dca9b908bb9f4b371342da - pristine_git_object: 5845456e5ca3089bcb551112408a0de84c597a91 - src/mistralai_azure/models/thinkchunk.py: - id: b1b9aeee4dcf - last_write_checksum: sha1:d15b39ef3e12195183664c32854233b9410d565b - pristine_git_object: f53a9f1ad2e6f124a36c9fb9be65bc09dbfbff4b - src/mistralai_azure/models/tool.py: - id: 99c8106f5428 - last_write_checksum: sha1:6142383805723bbc2b22f1bfcc660288378d1e42 - pristine_git_object: c91deec28488062a0220af41492fdfb34330e7a4 - src/mistralai_azure/models/toolcall.py: - id: 3643db1054cd - last_write_checksum: sha1:9b095f1efe1ea554cfacbc4a8e0c59b1c57d7f32 - pristine_git_object: 44fe8ec86b8f31ad8ee9591ae49036e8caa9ac41 - src/mistralai_azure/models/toolchoice.py: - id: 669768b7cbda - last_write_checksum: sha1:1217d8186e64d16f4c369079c62e3ac466726c60 - pristine_git_object: 93b4b7fe72f05a2ece9fed08a83139f4510b2574 - src/mistralai_azure/models/toolchoiceenum.py: - id: 5f7df8457771 + src/mistralai/azure/client/models/responsevalidationerror.py: + id: afdb9463b434 + last_write_checksum: sha1:26f01befeb347a63928012e7eb36c95a8a392145 + pristine_git_object: cbdffcbba45a988805cdd52d111e77b0ca777dbf + src/mistralai/azure/client/models/sdkerror.py: + id: 4601c7297af7 + last_write_checksum: sha1:b54041f9751e1f2a38dd02a6f8eadb3907fa3df0 + pristine_git_object: a1e9aacaa2fcc839dcb2638788dd7c94298adee7 + src/mistralai/azure/client/models/security.py: + id: 4a2e4760ec08 + last_write_checksum: sha1:0cd2ae54cecd88cfd8d43e92c0d3da7efa48942c + pristine_git_object: 9b83ba98336090bed89fbeda40b4a07b212a1106 + src/mistralai/azure/client/models/systemmessage.py: + id: 8fa0dee9e4e1 + last_write_checksum: sha1:2b52c44b92a098b559ec8b7a80449532169cd317 + pristine_git_object: 38c280c809148e190e329619858718d132da6bc0 + src/mistralai/azure/client/models/systemmessagecontentchunks.py: + id: 5918e770869d + last_write_checksum: sha1:55529f2f29ba3087fbf117dbbe64e1dda92b2958 + pristine_git_object: 225f38b712f5f3c7abfd526cc8c0386687814f36 + src/mistralai/azure/client/models/textchunk.py: + id: 9c81c76a6325 + last_write_checksum: sha1:d1c9eaffeb80299f023351dc8d07eb53e49133f2 + pristine_git_object: e513c1434cc7a4766bb9ef039ad8eed2bf0c12ca + src/mistralai/azure/client/models/thinkchunk.py: + id: df6bbd55b3eb + last_write_checksum: sha1:ec9af4cb7faa6ba8ed033b37db1d1d5a1406ac3f + pristine_git_object: e769399fe6ba90ddb2503f8fadb4b6cebc7d6f85 + src/mistralai/azure/client/models/tool.py: + id: 4075ef72c086 + last_write_checksum: sha1:0c041eaa008ee1851e05bf90e57602c0338f362f + pristine_git_object: 169305bc4c538e88b1e0cf1120aa10e424118880 + src/mistralai/azure/client/models/toolcall.py: + id: c65e6f79e539 + last_write_checksum: sha1:dd2290e019322e9df73b119e054a1d738eb5f3ba + pristine_git_object: a589b1b38ef4caaba2753f8335228bc16cd68961 + src/mistralai/azure/client/models/toolchoice.py: + id: c25062b5de34 + last_write_checksum: sha1:db82f8d3f811461226cffbeacf2699103a5e0689 + pristine_git_object: 1f623222084f12eaa63f2cea656dc7da10b12a3a + src/mistralai/azure/client/models/toolchoiceenum.py: + id: cc06ba3a8d21 last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 - src/mistralai_azure/models/toolmessage.py: - id: 1d9845bf98b3 - last_write_checksum: sha1:52bd15280bcae27ec7ba6a1c64b15648de5b0868 - pristine_git_object: 4bc5c9a9b509fdb89a4cf5ce81231189bf46bab4 - src/mistralai_azure/models/tooltypes.py: - id: 34c499f03e21 - last_write_checksum: sha1:f060bd3aebf7d42c1066c543c47cfa020e61eb27 - pristine_git_object: 638890c589ee642fd0a43e00337505e53ea3ec3a - src/mistralai_azure/models/usageinfo.py: - id: 59a5033672bf - last_write_checksum: sha1:7d0e7a483331077309b78e035cab9d65e87d3f65 - pristine_git_object: bbe5cdfaae260df81e93da11d05a1ba55ecbe329 - src/mistralai_azure/models/usermessage.py: - id: c54119314021 - last_write_checksum: sha1:b45f38755a96b07100baf5149631f366009e701f - pristine_git_object: 85fedb4bd1bcf64f69e4ead5310cf3fb354a6e3c - src/mistralai_azure/models/validationerror.py: - id: 83cd7bfd6d92 - last_write_checksum: sha1:250ed57498dabd11c0e2b6d255969e0285bb4214 - pristine_git_object: 4caff4a6b74aeb322bf42cd2070b7bd576ca834a - src/mistralai_azure/ocr.py: - id: 77e2e0f594ad - last_write_checksum: sha1:7daae9b0c14093d6d0bc0258b0bce008cb845a1e - pristine_git_object: 31e27f6eaa6dcc2b8450656d4a59dd4a7a50a29a - src/mistralai_azure/py.typed: - id: 98df238e554c + src/mistralai/azure/client/models/toolmessage.py: + id: 84ac736fa955 + last_write_checksum: sha1:11841bba4b66179321a35ea1a4d4d3571fa997b7 + pristine_git_object: a73fd6bf8355043f1b40caf7e8b9ded90c1fda0f + src/mistralai/azure/client/models/tooltypes.py: + id: fa881b046d34 + last_write_checksum: sha1:cd28ddc02fff9a5abbb59c82fe9e0dcbdb9b6d2a + pristine_git_object: 1cce7446f2772b998208ea1c78c7969e3881d5d0 + src/mistralai/azure/client/models/usageinfo.py: + id: 3edc9c81b329 + last_write_checksum: sha1:0b2117285b13d846a25c6c59436c4628b9d84a03 + pristine_git_object: 19a6b09fb63a3732719c45f8dfca92cfc2c57353 + src/mistralai/azure/client/models/usermessage.py: + id: 3796508adc07 + last_write_checksum: sha1:f4baa9d8b8f99f715873cea83191baf055c3296a + pristine_git_object: 96439c64a979ac3edf8900d39154d706846a3a95 + src/mistralai/azure/client/models/validationerror.py: + id: f2b84813e2ea + last_write_checksum: sha1:f0f9706a5af2ac4f6b234e768fdd492bbdd8a18c + pristine_git_object: 817ecf7a56470369ccacd0f5e0bb739656a5f92c + src/mistralai/azure/client/ocr.py: + id: 5817c10c9297 + last_write_checksum: sha1:24fec22877024154ea417e31ea443b4795c443ba + pristine_git_object: 098e764b6580e35ad0e81242ca601ce821656ee9 + src/mistralai/azure/client/py.typed: + id: e88369f116d2 last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 - src/mistralai_azure/sdkconfiguration.py: - id: 476a4f9e2f3e - last_write_checksum: sha1:6b117889b46a546be6e949c1bf843834ceff7417 - pristine_git_object: 51289cf05559ba32dd17e45fab78df4a8697063f - src/mistralai_azure/types/__init__.py: - id: d761bb7a67a5 + src/mistralai/azure/client/sdkconfiguration.py: + id: 602f74633eed + last_write_checksum: sha1:163fe779949725d81181f39b70d6922fc2cb8099 + pristine_git_object: 919225f9bf2e4315f879f0da6c7f8b3e6157bd58 + src/mistralai/azure/client/types/__init__.py: + id: f79033f78412 last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c - src/mistralai_azure/types/basemodel.py: - id: 68c97875efb7 + src/mistralai/azure/client/types/basemodel.py: + id: fd244927c80c last_write_checksum: sha1:10d84aedeb9d35edfdadf2c3020caa1d24d8b584 pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee - src/mistralai_azure/utils/__init__.py: - id: 3c68abef839b + src/mistralai/azure/client/utils/__init__.py: + id: 26f1a707325b last_write_checksum: sha1:887f56a717845fab7445cc368d2a17d850c3565a pristine_git_object: 05f26ade57efb8c54a774fbcb939fb1a7dc655ce - src/mistralai_azure/utils/annotations.py: - id: 476ee839718f + src/mistralai/azure/client/utils/annotations.py: + id: bb1f6c189fdb last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 - src/mistralai_azure/utils/datetimes.py: - id: e9faf3b28c48 + src/mistralai/azure/client/utils/datetimes.py: + id: 2b7db09ee0ab last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 - src/mistralai_azure/utils/enums.py: - id: 4d10693bf655 + src/mistralai/azure/client/utils/enums.py: + id: ffbdb1917a68 last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 - src/mistralai_azure/utils/eventstreaming.py: - id: 5f5e90529fd7 + src/mistralai/azure/client/utils/eventstreaming.py: + id: bdc37b70360c last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc - src/mistralai_azure/utils/forms.py: - id: 91c3fe9ba311 + src/mistralai/azure/client/utils/forms.py: + id: 51696122c557 last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 - src/mistralai_azure/utils/headers.py: - id: d37ef2f03e41 + src/mistralai/azure/client/utils/headers.py: + id: e42840c8cb13 last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a - src/mistralai_azure/utils/logger.py: - id: 9122a46617cc + src/mistralai/azure/client/utils/logger.py: + id: 9db88755a137 last_write_checksum: sha1:f3fdb154a3f09b8cc43d74c7e9c02f899f8086e4 pristine_git_object: b661aff65d38b77d035149699aea09b2785d2fc6 - src/mistralai_azure/utils/metadata.py: - id: 2d93fa8523eb + src/mistralai/azure/client/utils/metadata.py: + id: 44f85bd3b2e2 last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d - src/mistralai_azure/utils/queryparams.py: - id: dfd31ba97c2b + src/mistralai/azure/client/utils/queryparams.py: + id: ec1c03114156 last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 - src/mistralai_azure/utils/requestbodies.py: - id: c91db641d5b9 + src/mistralai/azure/client/utils/requestbodies.py: + id: 1030c47d624d last_write_checksum: sha1:41e2d2d2d3ecc394c8122ca4d4b85e1c3e03f054 pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c - src/mistralai_azure/utils/retries.py: - id: 6f0cd9f6169d + src/mistralai/azure/client/utils/retries.py: + id: d50ed6e400b2 last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 - src/mistralai_azure/utils/security.py: - id: "270040388028" + src/mistralai/azure/client/utils/security.py: + id: 1d35741ce5f1 last_write_checksum: sha1:a17130ace2c0db6394f38dd941ad2b700cc755c8 pristine_git_object: 295a3f40031dbb40073ad227fd4a355660f97ab2 - src/mistralai_azure/utils/serializers.py: - id: 595ddab03803 + src/mistralai/azure/client/utils/serializers.py: + id: a1f26d73c3ad last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 - src/mistralai_azure/utils/unmarshal_json_response.py: - id: bde89a892417 - last_write_checksum: sha1:d2ce9e3478b38e54e4bb3a43610ee0bab00c2e27 - pristine_git_object: f5813119b559442ee85c0b310765db3866bfa09d - src/mistralai_azure/utils/url.py: - id: 080c62716b06 + src/mistralai/azure/client/utils/unmarshal_json_response.py: + id: 947f4fc4db62 + last_write_checksum: sha1:99bd357d24d2236e3974630d9bd18bae22610cbc + pristine_git_object: 5317ac87097ccb35628202cf7fc5cb21e186855f + src/mistralai/azure/client/utils/url.py: + id: 4976c88d0e3b last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 - src/mistralai_azure/utils/values.py: - id: 640889083cda + src/mistralai/azure/client/utils/values.py: + id: 3974a1553447 last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "azureai", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + application/json: {"model": "azureai", "stream": true, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} responses: "422": application/json: {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "azureai", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + application/json: {"model": "azureai", "stream": false, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} @@ -715,7 +688,7 @@ examples: application/json: {} userExample: requestBody: - application/json: {"model": "CX-9", "document": {"document_url": "https://upset-labourer.net/", "type": "document_url"}, "bbox_annotation_format": {"type": "text"}, "document_annotation_format": {"type": "text"}} + application/json: {"model": "CX-9", "document": {"type": "document_url", "document_url": "https://upset-labourer.net/"}, "bbox_annotation_format": {"type": "text"}, "document_annotation_format": {"type": "text"}} responses: "200": application/json: {"pages": [{"index": 1, "markdown": "# LEVERAGING UNLABELED DATA TO PREDICT OUT-OF-DISTRIBUTION PERFORMANCE\nSaurabh Garg*
Carnegie Mellon University
sgarg2@andrew.cmu.edu
Sivaraman Balakrishnan
Carnegie Mellon University
sbalakri@andrew.cmu.edu
Zachary C. Lipton
Carnegie Mellon University
zlipton@andrew.cmu.edu\n## Behnam Neyshabur\nGoogle Research, Blueshift team
neyshabur@google.com\nHanie Sedghi
Google Research, Brain team
hsedghi@google.com\n#### Abstract\nReal-world machine learning deployments are characterized by mismatches between the source (training) and target (test) distributions that may cause performance drops. In this work, we investigate methods for predicting the target domain accuracy using only labeled source data and unlabeled target data. We propose Average Thresholded Confidence (ATC), a practical method that learns a threshold on the model's confidence, predicting accuracy as the fraction of unlabeled examples for which model confidence exceeds that threshold. ATC outperforms previous methods across several model architectures, types of distribution shifts (e.g., due to synthetic corruptions, dataset reproduction, or novel subpopulations), and datasets (WILDS, ImageNet, BREEDS, CIFAR, and MNIST). In our experiments, ATC estimates target performance $2-4 \\times$ more accurately than prior methods. We also explore the theoretical foundations of the problem, proving that, in general, identifying the accuracy is just as hard as identifying the optimal predictor and thus, the efficacy of any method rests upon (perhaps unstated) assumptions on the nature of the shift. Finally, analyzing our method on some toy distributions, we provide insights concerning when it works ${ }^{1}$.\n## 1 INTRODUCTION\nMachine learning models deployed in the real world typically encounter examples from previously unseen distributions. While the IID assumption enables us to evaluate models using held-out data from the source distribution (from which training data is sampled), this estimate is no longer valid in presence of a distribution shift. Moreover, under such shifts, model accuracy tends to degrade (Szegedy et al., 2014; Recht et al., 2019; Koh et al., 2021). Commonly, the only data available to the practitioner are a labeled training set (source) and unlabeled deployment-time data which makes the problem more difficult. In this setting, detecting shifts in the distribution of covariates is known to be possible (but difficult) in theory (Ramdas et al., 2015), and in practice (Rabanser et al., 2018). However, producing an optimal predictor using only labeled source and unlabeled target data is well-known to be impossible absent further assumptions (Ben-David et al., 2010; Lipton et al., 2018).\nTwo vital questions that remain are: (i) the precise conditions under which we can estimate a classifier's target-domain accuracy; and (ii) which methods are most practically useful. To begin, the straightforward way to assess the performance of a model under distribution shift would be to collect labeled (target domain) examples and then to evaluate the model on that data. However, collecting fresh labeled data from the target distribution is prohibitively expensive and time-consuming, especially if the target distribution is non-stationary. Hence, instead of using labeled data, we aim to use unlabeled data from the target distribution, that is comparatively abundant, to predict model performance. Note that in this work, our focus is not to improve performance on the target but, rather, to estimate the accuracy on the target for a given classifier.\n[^0]: Work done in part while Saurabh Garg was interning at Google ${ }^{1}$ Code is available at [https://github.com/saurabhgarg1996/ATC_code](https://github.com/saurabhgarg1996/ATC_code).\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 2, "markdown": "![img-0.jpeg](img-0.jpeg)\nFigure 1: Illustration of our proposed method ATC. Left: using source domain validation data, we identify a threshold on a score (e.g. negative entropy) computed on model confidence such that fraction of examples above the threshold matches the validation set accuracy. ATC estimates accuracy on unlabeled target data as the fraction of examples with the score above the threshold. Interestingly, this threshold yields accurate estimates on a wide set of target distributions resulting from natural and synthetic shifts. Right: Efficacy of ATC over previously proposed approaches on our testbed with a post-hoc calibrated model. To obtain errors on the same scale, we rescale all errors with Average Confidence (AC) error. Lower estimation error is better. See Table 1 for exact numbers and comparison on various types of distribution shift. See Sec. 5 for details on our testbed.\nRecently, numerous methods have been proposed for this purpose (Deng & Zheng, 2021; Chen et al., 2021b; Jiang et al., 2021; Deng et al., 2021; Guillory et al., 2021). These methods either require calibration on the target domain to yield consistent estimates (Jiang et al., 2021; Guillory et al., 2021) or additional labeled data from several target domains to learn a linear regression function on a distributional distance that then predicts model performance (Deng et al., 2021; Deng & Zheng, 2021; Guillory et al., 2021). However, methods that require calibration on the target domain typically yield poor estimates since deep models trained and calibrated on source data are not, in general, calibrated on a (previously unseen) target domain (Ovadia et al., 2019). Besides, methods that leverage labeled data from target domains rely on the fact that unseen target domains exhibit strong linear correlation with seen target domains on the underlying distance measure and, hence, can be rendered ineffective when such target domains with labeled data are unavailable (in Sec. 5.1 we demonstrate such a failure on a real-world distribution shift problem). Therefore, throughout the paper, we assume access to labeled source data and only unlabeled data from target domain(s).\nIn this work, we first show that absent assumptions on the source classifier or the nature of the shift, no method of estimating accuracy will work generally (even in non-contrived settings). To estimate accuracy on target domain perfectly, we highlight that even given perfect knowledge of the labeled source distribution (i.e., $p_{s}(x, y)$ ) and unlabeled target distribution (i.e., $p_{t}(x)$ ), we need restrictions on the nature of the shift such that we can uniquely identify the target conditional $p_{t}(y \\mid x)$. Thus, in general, identifying the accuracy of the classifier is as hard as identifying the optimal predictor.\nSecond, motivated by the superiority of methods that use maximum softmax probability (or logit) of a model for Out-Of-Distribution (OOD) detection (Hendrycks & Gimpel, 2016; Hendrycks et al., 2019), we propose a simple method that leverages softmax probability to predict model performance. Our method, Average Thresholded Confidence (ATC), learns a threshold on a score (e.g., maximum confidence or negative entropy) of model confidence on validation source data and predicts target domain accuracy as the fraction of unlabeled target points that receive a score above that threshold. ATC selects a threshold on validation source data such that the fraction of source examples that receive the score above the threshold match the accuracy of those examples. Our primary contribution in ATC is the proposal of obtaining the threshold and observing its efficacy on (practical) accuracy estimation. Importantly, our work takes a step forward in positively answering the question raised in Deng & Zheng (2021); Deng et al. (2021) about a practical strategy to select a threshold that enables accuracy prediction with thresholded model confidence.\n", "images": [{"id": "img-0.jpeg", "top_left_x": 292, "top_left_y": 217, "bottom_right_x": 1405, "bottom_right_y": 649, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 3, "markdown": "", "images": [], "dimensions": {"dpi": 539192, "height": 944919, "width": 247256}}, {"index": 27, "markdown": "![img-8.jpeg](img-8.jpeg)\nFigure 9: Scatter plot of predicted accuracy versus (true) OOD accuracy for vision datasets except MNIST with a ResNet50 model. Results reported by aggregating MAE numbers over 4 different seeds.\n", "images": [{"id": "img-8.jpeg", "top_left_x": 290, "top_left_y": 226, "bottom_right_x": 1405, "bottom_right_y": 1834, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 28, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 6.60 | 5.74 | 9.88 | 6.89 | 7.25 | 6.07 | 4.77 | 3.21 | 3.02 | 2.99 | 2.85 | | | | (0.35) | (0.30) | (0.16) | (0.13) | (0.15) | (0.16) | (0.13) | (0.49) | (0.40) | (0.37) | (0.29) | | | Synthetic | 12.33 | 10.20 | 16.50 | 11.91 | 13.87 | 11.08 | 6.55 | 4.65 | 4.25 | 4.21 | 3.87 | | | | (0.51) | (0.48) | (0.26) | (0.17) | (0.18) | (0.17) | (0.35) | (0.55) | (0.55) | (0.55) | (0.75) | | CIFAR100 | Synthetic | 13.69 | 11.51 | 23.61 | 13.10 | 14.60 | 10.14 | 9.85 | 5.50 | 4.75 | 4.72 | 4.94 | | | | (0.55) | (0.41) | (1.16) | (0.80) | (0.77) | (0.64) | (0.57) | (0.70) | (0.73) | (0.74) | (0.74) | | ImageNet200 | Natural | 12.37 | 8.19 | 22.07 | 8.61 | 15.17 | 7.81 | 5.13 | 4.37 | 2.04 | 3.79 | 1.45 | | | | (0.25) | (0.33) | (0.08) | (0.25) | (0.11) | (0.29) | (0.08) | (0.39) | (0.24) | (0.30) | (0.27) | | | Synthetic | 19.86 | 12.94 | 32.44 | 13.35 | 25.02 | 12.38 | 5.41 | 5.93 | 3.09 | 5.00 | 2.68 | | | | (1.38) | (1.81) | (1.00) | (1.30) | (1.10) | (1.38) | (0.89) | (1.38) | (0.87) | (1.28) | (0.45) | | ImageNet | Natural | 7.77 | 6.50 | 18.13 | 6.02 | 8.13 | 5.76 | 6.23 | 3.88 | 2.17 | 2.06 | 0.80 | | | | (0.27) | (0.33) | (0.23) | (0.34) | (0.27) | (0.37) | (0.41) | (0.53) | (0.62) | (0.54) | (0.44) | | | Synthetic | 13.39 | 10.12 | 24.62 | 8.51 | 13.55 | 7.90 | 6.32 | 3.34 | 2.53 | 2.61 | 4.89 | | | | (0.53) | (0.63) | (0.64) | (0.71) | (0.61) | (0.72) | (0.33) | (0.53) | (0.36) | (0.33) | (0.83) | | FMoW-WILDS | Natural | 5.53 | 4.31 | 33.53 | 12.84 | 5.94 | 4.45 | 5.74 | 3.06 | 2.70 | 3.02 | 2.72 | | | | (0.33) | (0.63) | (0.13) | (12.06) | (0.36) | (0.77) | (0.55) | (0.36) | (0.54) | (0.35) | (0.44) | | RxRx1-WILDS | Natural | 5.80 | 5.72 | 7.90 | 4.84 | 5.98 | 5.98 | 6.03 | 4.66 | 4.56 | 4.41 | 4.47 | | | | (0.17) | (0.15) | (0.24) | (0.09) | (0.15) | (0.13) | (0.08) | (0.38) | (0.38) | (0.31) | (0.26) | | Amazon-WILDS | Natural | 2.40 | 2.29 | 8.01 | 2.38 | 2.40 | 2.28 | 17.87 | 1.65 | 1.62 | 1.60 | 1.59 | | | | (0.08) | (0.09) | (0.53) | (0.17) | (0.09) | (0.09) | (0.18) | (0.06) | (0.05) | (0.14) | (0.15) | | CivilCom.-WILDS | Natural | 12.64 | 10.80 | 16.76 | 11.03 | 13.31 | 10.99 | 16.65 | | 7.14 | | | | | | (0.52) | (0.48) | (0.53) | (0.49) | (0.52) | (0.49) | (0.25) | | (0.41) | | | | MNIST | Natural | 18.48 | 15.99 | 21.17 | 14.81 | 20.19 | 14.56 | 24.42 | 5.02 | 2.40 | 3.14 | 3.50 | | | | (0.45) | (1.53) | (0.24) | (3.89) | (0.23) | (3.47) | (0.41) | (0.44) | (1.83) | (0.49) | (0.17) | | ENTITY-13 | Same | 16.23 | 11.14 | 24.97 | 10.88 | 19.08 | 10.47 | 10.71 | 5.39 | 3.88 | 4.58 | 4.19 | | | | (0.77) | (0.65) | (0.70) | (0.77) | (0.65) | (0.72) | (0.74) | (0.92) | (0.61) | (0.85) | (0.16) | | | Novel | 28.53 | 22.02 | 38.33 | 21.64 | 32.43 | 21.22 | 20.61 | 13.58 | 10.28 | 12.25 | 6.63 | | | | (0.82) | (0.68) | (0.75) | (0.86) | (0.69) | (0.80) | (0.60) | (1.15) | (1.34) | (1.21) | (0.93) | | ENTITY-30 | Same | 18.59 | 14.46 | 28.82 | 14.30 | 21.63 | 13.46 | 12.92 | 9.12 | 7.75 | 8.15 | 7.64 | | | | (0.51) | (0.52) | (0.43) | (0.71) | (0.37) | (0.59) | (0.14) | (0.62) | (0.72) | (0.68) | (0.88) | | | Novel | 32.34 | 26.85 | 44.02 | 26.27 | 36.82 | 25.42 | 23.16 | 17.75 | 14.30 | 15.60 | 10.57 | | | | (0.60) | (0.58) | (0.56) | (0.79) | (0.47) | (0.68) | (0.12) | (0.76) | (0.85) | (0.86) | (0.86) | | NONLIVING-26 | Same | 18.66 | 17.17 | 26.39 | 16.14 | 19.86 | 15.58 | 16.63 | 10.87 | 10.24 | 10.07 | 10.26 | | | | (0.76) | (0.74) | (0.82) | (0.81) | (0.67) | (0.76) | (0.45) | (0.98) | (0.83) | (0.92) | (1.18) | | | Novel | 33.43 | 31.53 | 41.66 | 29.87 | 35.13 | 29.31 | 29.56 | 21.70 | 20.12 | 19.08 | 18.26 | | | | (0.67) | (0.65) | (0.67) | (0.71) | (0.54) | (0.64) | (0.21) | (0.86) | (0.75) | (0.82) | (1.12) | | LIVING-17 | Same | 12.63 | 11.05 | 18.32 | 10.46 | 14.43 | 10.14 | 9.87 | 4.57 | 3.95 | 3.81 | 4.21 | | | | (1.25) | (1.20) | (1.01) | (1.12) | (1.11) | (1.16) | (0.61) | (0.71) | (0.48) | (0.22) | (0.53) | | | Novel | 29.03 | 26.96 | 35.67 | 26.11 | 31.73 | 25.73 | 23.53 | 16.15 | 14.49 | 12.97 | 11.39 | | | | (1.44) | (1.38) | (1.09) | (1.27) | (1.19) | (1.35) | (0.52) | (1.36) | (1.46) | (1.52) | (1.72) |\nTable 3: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. For language datasets, we use DistilBERT-base-uncased, for vision dataset we report results with DenseNet model with the exception of MNIST where we use FCN. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 29, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 7.14 | 6.20 | 10.25 | 7.06 | 7.68 | 6.35 | 5.74 | 4.02 | 3.85 | 3.76 | 3.38 | | | | (0.14) | (0.11) | (0.31) | (0.33) | (0.28) | (0.27) | (0.25) | (0.38) | (0.30) | (0.33) | (0.32) | | | Synthetic | 12.62 | 10.75 | 16.50 | 11.91 | 13.93 | 11.20 | 7.97 | 5.66 | 5.03 | 4.87 | 3.63 | | | | (0.76) | (0.71) | (0.28) | (0.24) | (0.29) | (0.28) | (0.13) | (0.64) | (0.71) | (0.71) | (0.62) | | CIFAR100 | Synthetic | 12.77 | 12.34 | 16.89 | 12.73 | 11.18 | 9.63 | 12.00 | 5.61 | 5.55 | 5.65 | 5.76 | | | | (0.43) | (0.68) | (0.20) | (2.59) | (0.35) | (1.25) | (0.48) | (0.51) | (0.55) | (0.35) | (0.27) | | ImageNet200 | Natural | 12.63 | 7.99 | 23.08 | 7.22 | 15.40 | 6.33 | 5.00 | 4.60 | 1.80 | 4.06 | 1.38 | | | | (0.59) | (0.47) | (0.31) | (0.22) | (0.42) | (0.24) | (0.36) | (0.63) | (0.17) | (0.69) | (0.29) | | | Synthetic | 20.17 | 11.74 | 33.69 | 9.51 | 25.49 | 8.61 | 4.19 | 5.37 | 2.78 | 4.53 | 3.58 | | | | (0.74) | (0.80) | (0.73) | (0.51) | (0.66) | (0.50) | (0.14) | (0.88) | (0.23) | (0.79) | (0.33) | | ImageNet | Natural | 8.09 | 6.42 | 21.66 | 5.91 | 8.53 | 5.21 | 5.90 | 3.93 | 1.89 | 2.45 | 0.73 | | | | (0.25) | (0.28) | (0.38) | (0.22) | (0.26) | (0.25) | (0.44) | (0.26) | (0.21) | (0.16) | (0.10) | | | Synthetic | 13.93 | 9.90 | 28.05 | 7.56 | 13.82 | 6.19 | 6.70 | 3.33 | 2.55 | 2.12 | 5.06 | | | | (0.14) | (0.23) | (0.39) | (0.13) | (0.31) | (0.07) | (0.52) | (0.25) | (0.25) | (0.31) | (0.27) | | FMoW-WILDS | Natural | 5.15 | 3.55 | 34.64 | 5.03 | 5.58 | 3.46 | 5.08 | 2.59 | 2.33 | 2.52 | 2.22 | | | | (0.19) | (0.41) | (0.22) | (0.29) | (0.17) | (0.37) | (0.46) | (0.32) | (0.28) | (0.25) | (0.30) | | RxRx1-WILDS | Natural | 6.17 | 6.11 | 21.05 | 5.21 | 6.54 | 6.27 | 6.82 | 5.30 | 5.20 | 5.19 | 5.63 | | | | (0.20) | (0.24) | (0.31) | (0.18) | (0.21) | (0.20) | (0.31) | (0.30) | (0.44) | (0.43) | (0.55) | | Entity-13 | Same | 18.32 | 14.38 | 27.79 | 13.56 | 20.50 | 13.22 | 16.09 | 9.35 | 7.50 | 7.80 | 6.94 | | | | (0.29) | (0.53) | (1.18) | (0.58) | (0.47) | (0.58) | (0.84) | (0.79) | (0.65) | (0.62) | (0.71) | | | Novel | 28.82 | 24.03 | 38.97 | 22.96 | 31.66 | 22.61 | 25.26 | 17.11 | 13.96 | 14.75 | 9.94 | | | | (0.30) | (0.55) | (1.32) | (0.59) | (0.54) | (0.58) | (1.08) | (0.93) | (0.64) | (0.78) | | | Entity-30 | Same | 16.91 | 14.61 | 26.84 | 14.37 | 18.60 | 13.11 | 13.74 | 8.54 | 7.94 | 7.77 | 8.04 | | | | (1.33) | (1.11) | (2.15) | (1.34) | (1.69) | (1.30) | (1.07) | (1.47) | (1.38) | (1.44) | (1.51) | | | Novel | 28.66 | 25.83 | 39.21 | 25.03 | 30.95 | 23.73 | 23.15 | 15.57 | 13.24 | 12.44 | 11.05 | | | | (1.16) | (0.88) | (2.03) | (1.11) | (1.64) | (1.11) | (0.51) | (1.44) | (1.15) | (1.26) | (1.13) | | NonLIVING-26 | Same | 17.43 | 15.95 | 27.70 | 15.40 | 18.06 | 14.58 | 16.99 | 10.79 | 10.13 | 10.05 | 10.29 | | | | (0.90) | (0.86) | (0.90) | (0.69) | (1.00) | (0.78) | (1.25) | (0.62) | (0.32) | (0.46) | (0.79) | | | Novel | 29.51 | 27.75 | 40.02 | 26.77 | 30.36 | 25.93 | 27.70 | 19.64 | 17.75 | 16.90 | 15.69 | | | | (0.86) | (0.82) | (0.76) | (0.82) | (0.95) | (0.80) | (1.42) | (0.68) | (0.53) | (0.60) | (0.83) | | LIVING-17 | Same | 14.28 | 12.21 | 23.46 | 11.16 | 15.22 | 10.78 | 10.49 | 4.92 | 4.23 | 4.19 | 4.73 | | | | (0.96) | (0.93) | (1.16) | (0.90) | (0.96) | (0.99) | (0.97) | (0.57) | (0.42) | (0.35) | (0.24) | | | Novel | 28.91 | 26.35 | 38.62 | 24.91 | 30.32 | 24.52 | 22.49 | 15.42 | 13.02 | 12.29 | 10.34 | | | | (0.66) | (0.73) | (1.01) | (0.61) | (0.59) | (0.74) | (0.85) | (0.59) | (0.53) | (0.73) | (0.62) |\nTable 4: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift for ResNet model. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}], "model": "mistral-ocr-2503-completion", "usage_info": {"pages_processed": 29, "doc_size_bytes": null}} diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/azure/.speakeasy/gen.yaml similarity index 78% rename from packages/mistralai_azure/.speakeasy/gen.yaml rename to packages/azure/.speakeasy/gen.yaml index e2be4d84..729cdfcf 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/azure/.speakeasy/gen.yaml @@ -8,11 +8,13 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true - nameResolutionFeb2025: false + nameResolutionFeb2025: true parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true - securityFeb2025: false - sharedErrorComponentsApr2025: false + securityFeb2025: true + sharedErrorComponentsApr2025: true + methodSignaturesApr2024: true + sharedNestedComponentsJan2026: true auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -26,7 +28,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.8.1 + version: 2.0.0a4 additionalDependencies: dev: pytest: ^8.2.2 @@ -45,10 +47,13 @@ python: enableCustomCodeRegions: false enumFormat: union fixFlags: - responseRequiredSep2024: false + responseRequiredSep2024: true + flatAdditionalProperties: true flattenGlobalSecurity: true flattenRequests: true flatteningOrder: parameters-first + forwardCompatibleEnumsByDefault: true + forwardCompatibleUnionsByDefault: tagged-only imports: option: openapi paths: @@ -62,12 +67,12 @@ python: license: "" maxMethodParams: 15 methodArguments: infer-optional-args - moduleName: "" + moduleName: mistralai.azure.client multipartArrayFormat: legacy outputModelSuffix: output packageManager: uv - packageName: mistralai_azure - preApplyUnionDiscriminators: false + packageName: mistralai-azure + preApplyUnionDiscriminators: true pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat diff --git a/packages/mistralai_azure/CONTRIBUTING.md b/packages/azure/CONTRIBUTING.md similarity index 100% rename from packages/mistralai_azure/CONTRIBUTING.md rename to packages/azure/CONTRIBUTING.md diff --git a/packages/mistralai_azure/README.md b/packages/azure/README.md similarity index 57% rename from packages/mistralai_azure/README.md rename to packages/azure/README.md index f869b90a..6eff040f 100644 --- a/packages/mistralai_azure/README.md +++ b/packages/azure/README.md @@ -14,7 +14,7 @@ uv add mistralai **Prerequisites** -Before you begin, ensure you have `AZUREAI_ENDPOINT` and an `AZURE_API_KEY`. To obtain these, you will need to deploy Mistral on Azure AI. +Before you begin, ensure you have `AZURE_ENDPOINT` and an `AZURE_API_KEY`. To obtain these, you will need to deploy Mistral on Azure AI. See [instructions for deploying Mistral on Azure AI here](https://docs.mistral.ai/deployment/cloud/azure/). @@ -24,58 +24,73 @@ See [instructions for deploying Mistral on Azure AI here](https://docs.mistral.a This example shows how to create chat completions. +The SDK automatically injects the `api-version` query parameter. + ```python # Synchronous Example -from mistralai_azure import MistralAzure +from mistralai.azure.client import MistralAzure import os +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, ) - res = s.chat.complete( messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], - model="azureai" + ], + model=AZURE_MODEL, ) if res is not None: # handle response - pass + print(res.choices[0].message.content) ```
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. ```python # Asynchronous Example import asyncio -from mistralai_azure import MistralAzure import os +from mistralai.azure.client import MistralAzure + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") async def main(): + # The SDK automatically injects api-version as a query parameter s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, ) res = await s.chat.complete_async( messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], - model="azureai" + ], + model=AZURE_MODEL, ) if res is not None: # handle response - pass + print(res.choices[0].message.content) asyncio.run(main()) ``` @@ -87,7 +102,7 @@ asyncio.run(main()) ### [chat](docs/sdks/chat/README.md) * [stream](docs/sdks/chat/README.md#stream) - Stream chat completion -* [create](docs/sdks/chat/README.md#create) - Chat Completion +* [complete](docs/sdks/chat/README.md#complete) - Chat Completion @@ -100,23 +115,29 @@ terminate when the server no longer has any events to send and closes the underlying connection. ```python -from mistralai_azure import MistralAzure +from mistralai.azure.client import MistralAzure import os +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, ) - res = s.chat.stream( messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], - model="azureai" + ], + model=AZURE_MODEL, ) if res is not None: @@ -137,23 +158,36 @@ Some of the endpoints in this SDK support retries. If you use the SDK without an To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: ```python -from mistralai_azure import MistralAzure -from mistralazure.utils import BackoffStrategy, RetryConfig +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.utils import BackoffStrategy, RetryConfig import os +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, ) - -res = s.chat.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai", - RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=AZURE_MODEL, + retries=RetryConfig( + "backoff", + BackoffStrategy(1, 50, 1.1, 100), + False + ), +) if res is not None: for event in res: @@ -164,23 +198,32 @@ if res is not None: If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: ```python -from mistralai_azure import MistralAzure -from mistralazure.utils import BackoffStrategy, RetryConfig +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.utils import BackoffStrategy, RetryConfig import os +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ["AZURE_API_VERSION"] + +# The SDK automatically injects api-version as a query parameter s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") ) - -res = s.chat.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=AZURE_MODEL, +) if res is not None: for event in res: @@ -193,7 +236,7 @@ if res is not None: ## Error Handling -Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. +Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. | Error Object | Status Code | Content Type | | -------------------------- | ----------- | ---------------- | @@ -203,22 +246,33 @@ Handling errors in this SDK should largely match your expectations. All operati ### Example ```python -from mistralai_azure import MistralAzure, models +from mistralai.azure.client import MistralAzure +from mistralai.azure.client import models import os +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, ) res = None try: - res = s.chat.complete(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") + res = s.chat.complete( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=AZURE_MODEL, + ) except models.HTTPValidationError as e: # handle exception @@ -237,62 +291,28 @@ if res is not None: ## Server Selection -### Select Server by Name - -You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: - -| Name | Server | Variables | -| ------ | ------------------------ | --------- | -| `prod` | `https://api.mistral.ai` | None | - -#### Example - -```python -from mistralai_azure import MistralAzure -import os - -s = MistralAzure( - server="prod", - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") -) - - -res = s.chat.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") - -if res is not None: - for event in res: - # handle event - print(event) - -``` - - ### Override Server URL Per-Client -The default server can also be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: +For Azure, you must provide your Azure AI Foundry endpoint via `server_url`. The SDK automatically injects the `api-version` query parameter: ```python -from mistralai_azure import MistralAzure +from mistralai.azure.client import MistralAzure import os s = MistralAzure( - server_url="https://api.mistral.ai", - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=os.environ["AZURE_API_KEY"], + server_url=os.environ["AZURE_ENDPOINT"], + api_version=os.environ.get("AZURE_API_VERSION", "2024-05-01-preview"), ) - -res = s.chat.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=os.environ["AZURE_MODEL"], +) if res is not None: for event in res: @@ -311,17 +331,24 @@ This allows you to wrap the client with your own custom logic, such as adding cu For example, you could specify a header for every request that this sdk makes as follows: ```python -from mistralai_azure import MistralAzure +from mistralai.azure.client import MistralAzure import httpx +import os http_client = httpx.Client(headers={"x-custom-header": "someValue"}) -s = MistralAzure(client=http_client) +s = MistralAzure( + api_key=os.environ["AZURE_API_KEY"], + server_url=os.environ["AZURE_ENDPOINT"], + api_version=os.environ.get("AZURE_API_VERSION", "2024-05-01-preview"), + client=http_client, +) ``` or you could wrap the client with your own custom logic: ```python -from mistralai_azure import MistralAzure -from mistralai_azure.httpclient import AsyncHttpClient +from typing import Any, Optional, Union +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.httpclient import AsyncHttpClient import httpx class CustomClient(AsyncHttpClient): @@ -379,7 +406,11 @@ class CustomClient(AsyncHttpClient): extensions=extensions, ) -s = MistralAzure(async_client=CustomClient(httpx.AsyncClient())) +s = MistralAzure( + api_key="", + server_url="", + async_client=CustomClient(httpx.AsyncClient()), +) ``` @@ -394,23 +425,26 @@ This SDK supports the following security scheme globally: | --------- | ---- | ----------- | | `api_key` | http | HTTP Bearer | -To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: +To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. You must also provide `server_url` pointing to your Azure AI Foundry endpoint. The SDK automatically injects the `api-version` query parameter: ```python -from mistralai_azure import MistralAzure +from mistralai.azure.client import MistralAzure import os s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=os.environ["AZURE_API_KEY"], + server_url=os.environ["AZURE_ENDPOINT"], + api_version=os.environ.get("AZURE_API_VERSION", "2024-05-01-preview"), ) - -res = s.chat.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=os.environ["AZURE_MODEL"], +) if res is not None: for event in res: @@ -426,5 +460,5 @@ if res is not None: ## Contributions -While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. -We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. +While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. +We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/packages/mistralai_azure/RELEASES.md b/packages/azure/RELEASES.md similarity index 100% rename from packages/mistralai_azure/RELEASES.md rename to packages/azure/RELEASES.md diff --git a/packages/azure/USAGE.md b/packages/azure/USAGE.md new file mode 100644 index 00000000..a4bc5147 --- /dev/null +++ b/packages/azure/USAGE.md @@ -0,0 +1,70 @@ + +### Create Chat Completions + +This example shows how to create chat completions. + +The SDK automatically injects the `api-version` query parameter. + +```python +# Synchronous Example +from mistralai.azure.client import MistralAzure +import os + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter +s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, +) + +res = s.chat.complete(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model=AZURE_MODEL) + +if res is not None: + # handle response + print(res.choices[0].message.content) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +import os +from mistralai.azure.client import MistralAzure + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +async def main(): + # The SDK automatically injects api-version as a query parameter + s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + ) + res = await s.chat.complete_async(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], model=AZURE_MODEL) + if res is not None: + # handle response + print(res.choices[0].message.content) + +asyncio.run(main()) +``` + diff --git a/packages/mistralai_azure/docs/models/arguments.md b/packages/azure/docs/models/arguments.md similarity index 100% rename from packages/mistralai_azure/docs/models/arguments.md rename to packages/azure/docs/models/arguments.md diff --git a/packages/mistralai_azure/docs/models/assistantmessage.md b/packages/azure/docs/models/assistantmessage.md similarity index 95% rename from packages/mistralai_azure/docs/models/assistantmessage.md rename to packages/azure/docs/models/assistantmessage.md index 3d0bd90b..9ef63837 100644 --- a/packages/mistralai_azure/docs/models/assistantmessage.md +++ b/packages/azure/docs/models/assistantmessage.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | | `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | | `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/assistantmessagecontent.md b/packages/azure/docs/models/assistantmessagecontent.md similarity index 100% rename from packages/mistralai_azure/docs/models/assistantmessagecontent.md rename to packages/azure/docs/models/assistantmessagecontent.md diff --git a/packages/mistralai_azure/docs/models/chatcompletionchoice.md b/packages/azure/docs/models/chatcompletionchoice.md similarity index 100% rename from packages/mistralai_azure/docs/models/chatcompletionchoice.md rename to packages/azure/docs/models/chatcompletionchoice.md diff --git a/packages/mistralai_azure/docs/models/chatcompletionchoicefinishreason.md b/packages/azure/docs/models/chatcompletionchoicefinishreason.md similarity index 100% rename from packages/mistralai_azure/docs/models/chatcompletionchoicefinishreason.md rename to packages/azure/docs/models/chatcompletionchoicefinishreason.md diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/azure/docs/models/chatcompletionrequest.md similarity index 99% rename from packages/mistralai_azure/docs/models/chatcompletionrequest.md rename to packages/azure/docs/models/chatcompletionrequest.md index 104a1f96..3b0f7270 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequest.md +++ b/packages/azure/docs/models/chatcompletionrequest.md @@ -13,7 +13,7 @@ | `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.ChatCompletionRequestMessage](../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | diff --git a/packages/mistralai_azure/docs/models/messages.md b/packages/azure/docs/models/chatcompletionrequestmessage.md similarity index 92% rename from packages/mistralai_azure/docs/models/messages.md rename to packages/azure/docs/models/chatcompletionrequestmessage.md index 1d394500..91e9e062 100644 --- a/packages/mistralai_azure/docs/models/messages.md +++ b/packages/azure/docs/models/chatcompletionrequestmessage.md @@ -1,4 +1,4 @@ -# Messages +# ChatCompletionRequestMessage ## Supported Types diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequeststop.md b/packages/azure/docs/models/chatcompletionrequeststop.md similarity index 100% rename from packages/mistralai_azure/docs/models/chatcompletionrequeststop.md rename to packages/azure/docs/models/chatcompletionrequeststop.md diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md b/packages/azure/docs/models/chatcompletionrequesttoolchoice.md similarity index 100% rename from packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md rename to packages/azure/docs/models/chatcompletionrequesttoolchoice.md diff --git a/packages/mistralai_azure/docs/models/chatcompletionresponse.md b/packages/azure/docs/models/chatcompletionresponse.md similarity index 100% rename from packages/mistralai_azure/docs/models/chatcompletionresponse.md rename to packages/azure/docs/models/chatcompletionresponse.md diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/azure/docs/models/chatcompletionstreamrequest.md similarity index 99% rename from packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md rename to packages/azure/docs/models/chatcompletionstreamrequest.md index 85f237b4..f78156a6 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md +++ b/packages/azure/docs/models/chatcompletionstreamrequest.md @@ -10,10 +10,10 @@ | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.ChatCompletionStreamRequestMessage](../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequestmessages.md b/packages/azure/docs/models/chatcompletionstreamrequestmessage.md similarity index 91% rename from packages/mistralai_gcp/docs/models/chatcompletionrequestmessages.md rename to packages/azure/docs/models/chatcompletionstreamrequestmessage.md index bc7708a6..2e4e93ac 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequestmessages.md +++ b/packages/azure/docs/models/chatcompletionstreamrequestmessage.md @@ -1,4 +1,4 @@ -# ChatCompletionRequestMessages +# ChatCompletionStreamRequestMessage ## Supported Types diff --git a/packages/mistralai_gcp/docs/models/stop.md b/packages/azure/docs/models/chatcompletionstreamrequeststop.md similarity index 88% rename from packages/mistralai_gcp/docs/models/stop.md rename to packages/azure/docs/models/chatcompletionstreamrequeststop.md index ba40ca83..a48460a9 100644 --- a/packages/mistralai_gcp/docs/models/stop.md +++ b/packages/azure/docs/models/chatcompletionstreamrequeststop.md @@ -1,4 +1,4 @@ -# Stop +# ChatCompletionStreamRequestStop Stop generation if this token is detected. Or if one of these tokens is detected when providing an array diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md b/packages/azure/docs/models/chatcompletionstreamrequesttoolchoice.md similarity index 100% rename from packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md rename to packages/azure/docs/models/chatcompletionstreamrequesttoolchoice.md diff --git a/packages/mistralai_azure/docs/models/completionchunk.md b/packages/azure/docs/models/completionchunk.md similarity index 100% rename from packages/mistralai_azure/docs/models/completionchunk.md rename to packages/azure/docs/models/completionchunk.md diff --git a/packages/mistralai_azure/docs/models/completionevent.md b/packages/azure/docs/models/completionevent.md similarity index 100% rename from packages/mistralai_azure/docs/models/completionevent.md rename to packages/azure/docs/models/completionevent.md diff --git a/packages/azure/docs/models/completionresponsestreamchoice.md b/packages/azure/docs/models/completionresponsestreamchoice.md new file mode 100644 index 00000000..1532c25b --- /dev/null +++ b/packages/azure/docs/models/completionresponsestreamchoice.md @@ -0,0 +1,10 @@ +# CompletionResponseStreamChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | +| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | +| `finish_reason` | [Nullable[models.CompletionResponseStreamChoiceFinishReason]](../models/completionresponsestreamchoicefinishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/finishreason.md b/packages/azure/docs/models/completionresponsestreamchoicefinishreason.md similarity index 81% rename from packages/mistralai_gcp/docs/models/finishreason.md rename to packages/azure/docs/models/completionresponsestreamchoicefinishreason.md index 45a5aedb..0fece473 100644 --- a/packages/mistralai_gcp/docs/models/finishreason.md +++ b/packages/azure/docs/models/completionresponsestreamchoicefinishreason.md @@ -1,4 +1,4 @@ -# FinishReason +# CompletionResponseStreamChoiceFinishReason ## Values diff --git a/packages/mistralai_azure/docs/models/contentchunk.md b/packages/azure/docs/models/contentchunk.md similarity index 100% rename from packages/mistralai_azure/docs/models/contentchunk.md rename to packages/azure/docs/models/contentchunk.md diff --git a/packages/azure/docs/models/deltamessage.md b/packages/azure/docs/models/deltamessage.md new file mode 100644 index 00000000..e0ee575f --- /dev/null +++ b/packages/azure/docs/models/deltamessage.md @@ -0,0 +1,10 @@ +# DeltaMessage + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.DeltaMessageContent]](../models/deltamessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/content.md b/packages/azure/docs/models/deltamessagecontent.md similarity index 89% rename from packages/mistralai_gcp/docs/models/content.md rename to packages/azure/docs/models/deltamessagecontent.md index a833dc2c..8142772d 100644 --- a/packages/mistralai_gcp/docs/models/content.md +++ b/packages/azure/docs/models/deltamessagecontent.md @@ -1,4 +1,4 @@ -# Content +# DeltaMessageContent ## Supported Types diff --git a/packages/mistralai_azure/docs/models/document.md b/packages/azure/docs/models/document.md similarity index 100% rename from packages/mistralai_azure/docs/models/document.md rename to packages/azure/docs/models/document.md diff --git a/packages/azure/docs/models/documenturlchunk.md b/packages/azure/docs/models/documenturlchunk.md new file mode 100644 index 00000000..9dbfbe50 --- /dev/null +++ b/packages/azure/docs/models/documenturlchunk.md @@ -0,0 +1,10 @@ +# DocumentURLChunk + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | +| `type` | *Optional[Literal["document_url"]]* | :heavy_minus_sign: | N/A | +| `document_url` | *str* | :heavy_check_mark: | N/A | +| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/filechunk.md b/packages/azure/docs/models/filechunk.md similarity index 100% rename from packages/mistralai_azure/docs/models/filechunk.md rename to packages/azure/docs/models/filechunk.md diff --git a/packages/mistralai_azure/docs/models/format_.md b/packages/azure/docs/models/format_.md similarity index 100% rename from packages/mistralai_azure/docs/models/format_.md rename to packages/azure/docs/models/format_.md diff --git a/packages/mistralai_azure/docs/models/function.md b/packages/azure/docs/models/function.md similarity index 100% rename from packages/mistralai_azure/docs/models/function.md rename to packages/azure/docs/models/function.md diff --git a/packages/mistralai_azure/docs/models/functioncall.md b/packages/azure/docs/models/functioncall.md similarity index 100% rename from packages/mistralai_azure/docs/models/functioncall.md rename to packages/azure/docs/models/functioncall.md diff --git a/packages/mistralai_azure/docs/models/functionname.md b/packages/azure/docs/models/functionname.md similarity index 100% rename from packages/mistralai_azure/docs/models/functionname.md rename to packages/azure/docs/models/functionname.md diff --git a/packages/mistralai_azure/docs/models/httpvalidationerror.md b/packages/azure/docs/models/httpvalidationerror.md similarity index 100% rename from packages/mistralai_azure/docs/models/httpvalidationerror.md rename to packages/azure/docs/models/httpvalidationerror.md diff --git a/packages/azure/docs/models/imagedetail.md b/packages/azure/docs/models/imagedetail.md new file mode 100644 index 00000000..1e5ba3fd --- /dev/null +++ b/packages/azure/docs/models/imagedetail.md @@ -0,0 +1,10 @@ +# ImageDetail + + +## Values + +| Name | Value | +| ------ | ------ | +| `LOW` | low | +| `AUTO` | auto | +| `HIGH` | high | \ No newline at end of file diff --git a/packages/azure/docs/models/imageurl.md b/packages/azure/docs/models/imageurl.md new file mode 100644 index 00000000..6358e0ac --- /dev/null +++ b/packages/azure/docs/models/imageurl.md @@ -0,0 +1,9 @@ +# ImageURL + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `url` | *str* | :heavy_check_mark: | N/A | +| `detail` | [OptionalNullable[models.ImageDetail]](../models/imagedetail.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/imageurlchunk.md b/packages/azure/docs/models/imageurlchunk.md new file mode 100644 index 00000000..db0c53d2 --- /dev/null +++ b/packages/azure/docs/models/imageurlchunk.md @@ -0,0 +1,11 @@ +# ImageURLChunk + +{"type":"image_url","image_url":{"url":"data:image/png;base64,iVBORw0 + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `type` | *Optional[Literal["image_url"]]* | :heavy_minus_sign: | N/A | +| `image_url` | [models.ImageURLUnion](../models/imageurlunion.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/imageurlchunkimageurl.md b/packages/azure/docs/models/imageurlunion.md similarity index 86% rename from packages/mistralai_gcp/docs/models/imageurlchunkimageurl.md rename to packages/azure/docs/models/imageurlunion.md index 76738908..db97130f 100644 --- a/packages/mistralai_gcp/docs/models/imageurlchunkimageurl.md +++ b/packages/azure/docs/models/imageurlunion.md @@ -1,4 +1,4 @@ -# ImageURLChunkImageURL +# ImageURLUnion ## Supported Types diff --git a/packages/mistralai_azure/docs/models/jsonschema.md b/packages/azure/docs/models/jsonschema.md similarity index 100% rename from packages/mistralai_azure/docs/models/jsonschema.md rename to packages/azure/docs/models/jsonschema.md diff --git a/packages/mistralai_azure/docs/models/loc.md b/packages/azure/docs/models/loc.md similarity index 100% rename from packages/mistralai_azure/docs/models/loc.md rename to packages/azure/docs/models/loc.md diff --git a/packages/mistralai_azure/docs/models/mistralpromptmode.md b/packages/azure/docs/models/mistralpromptmode.md similarity index 100% rename from packages/mistralai_azure/docs/models/mistralpromptmode.md rename to packages/azure/docs/models/mistralpromptmode.md diff --git a/packages/mistralai_azure/docs/models/ocrimageobject.md b/packages/azure/docs/models/ocrimageobject.md similarity index 100% rename from packages/mistralai_azure/docs/models/ocrimageobject.md rename to packages/azure/docs/models/ocrimageobject.md diff --git a/packages/mistralai_azure/docs/models/ocrpagedimensions.md b/packages/azure/docs/models/ocrpagedimensions.md similarity index 100% rename from packages/mistralai_azure/docs/models/ocrpagedimensions.md rename to packages/azure/docs/models/ocrpagedimensions.md diff --git a/packages/mistralai_azure/docs/models/ocrpageobject.md b/packages/azure/docs/models/ocrpageobject.md similarity index 100% rename from packages/mistralai_azure/docs/models/ocrpageobject.md rename to packages/azure/docs/models/ocrpageobject.md diff --git a/packages/mistralai_azure/docs/models/ocrrequest.md b/packages/azure/docs/models/ocrrequest.md similarity index 100% rename from packages/mistralai_azure/docs/models/ocrrequest.md rename to packages/azure/docs/models/ocrrequest.md diff --git a/packages/mistralai_azure/docs/models/ocrresponse.md b/packages/azure/docs/models/ocrresponse.md similarity index 100% rename from packages/mistralai_azure/docs/models/ocrresponse.md rename to packages/azure/docs/models/ocrresponse.md diff --git a/packages/mistralai_azure/docs/models/ocrtableobject.md b/packages/azure/docs/models/ocrtableobject.md similarity index 100% rename from packages/mistralai_azure/docs/models/ocrtableobject.md rename to packages/azure/docs/models/ocrtableobject.md diff --git a/packages/mistralai_azure/docs/models/ocrusageinfo.md b/packages/azure/docs/models/ocrusageinfo.md similarity index 100% rename from packages/mistralai_azure/docs/models/ocrusageinfo.md rename to packages/azure/docs/models/ocrusageinfo.md diff --git a/packages/mistralai_azure/docs/models/prediction.md b/packages/azure/docs/models/prediction.md similarity index 100% rename from packages/mistralai_azure/docs/models/prediction.md rename to packages/azure/docs/models/prediction.md diff --git a/packages/azure/docs/models/referencechunk.md b/packages/azure/docs/models/referencechunk.md new file mode 100644 index 00000000..d847e248 --- /dev/null +++ b/packages/azure/docs/models/referencechunk.md @@ -0,0 +1,9 @@ +# ReferenceChunk + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------- | -------------------------------- | -------------------------------- | -------------------------------- | +| `type` | *Optional[Literal["reference"]]* | :heavy_minus_sign: | N/A | +| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformat.md b/packages/azure/docs/models/responseformat.md similarity index 100% rename from packages/mistralai_azure/docs/models/responseformat.md rename to packages/azure/docs/models/responseformat.md diff --git a/packages/mistralai_azure/docs/models/responseformats.md b/packages/azure/docs/models/responseformats.md similarity index 100% rename from packages/mistralai_azure/docs/models/responseformats.md rename to packages/azure/docs/models/responseformats.md diff --git a/packages/mistralai_azure/docs/models/security.md b/packages/azure/docs/models/security.md similarity index 100% rename from packages/mistralai_azure/docs/models/security.md rename to packages/azure/docs/models/security.md diff --git a/packages/mistralai_gcp/docs/models/systemmessage.md b/packages/azure/docs/models/systemmessage.md similarity index 88% rename from packages/mistralai_gcp/docs/models/systemmessage.md rename to packages/azure/docs/models/systemmessage.md index 0dba71c0..10bda10f 100644 --- a/packages/mistralai_gcp/docs/models/systemmessage.md +++ b/packages/azure/docs/models/systemmessage.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `role` | *Literal["system"]* | :heavy_check_mark: | N/A | +| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/systemmessagecontent.md b/packages/azure/docs/models/systemmessagecontent.md similarity index 100% rename from packages/mistralai_azure/docs/models/systemmessagecontent.md rename to packages/azure/docs/models/systemmessagecontent.md diff --git a/packages/mistralai_azure/docs/models/systemmessagecontentchunks.md b/packages/azure/docs/models/systemmessagecontentchunks.md similarity index 100% rename from packages/mistralai_azure/docs/models/systemmessagecontentchunks.md rename to packages/azure/docs/models/systemmessagecontentchunks.md diff --git a/packages/mistralai_azure/docs/models/tableformat.md b/packages/azure/docs/models/tableformat.md similarity index 100% rename from packages/mistralai_azure/docs/models/tableformat.md rename to packages/azure/docs/models/tableformat.md diff --git a/packages/azure/docs/models/textchunk.md b/packages/azure/docs/models/textchunk.md new file mode 100644 index 00000000..b266619d --- /dev/null +++ b/packages/azure/docs/models/textchunk.md @@ -0,0 +1,9 @@ +# TextChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `type` | *Literal["text"]* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/thinkchunk.md b/packages/azure/docs/models/thinkchunk.md similarity index 91% rename from packages/mistralai_azure/docs/models/thinkchunk.md rename to packages/azure/docs/models/thinkchunk.md index 66b2e0cd..b07f598e 100644 --- a/packages/mistralai_azure/docs/models/thinkchunk.md +++ b/packages/azure/docs/models/thinkchunk.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `type` | *Literal["thinking"]* | :heavy_check_mark: | N/A | | `thinking` | List[[models.Thinking](../models/thinking.md)] | :heavy_check_mark: | N/A | -| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | -| `type` | [Optional[models.ThinkChunkType]](../models/thinkchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/thinking.md b/packages/azure/docs/models/thinking.md similarity index 100% rename from packages/mistralai_azure/docs/models/thinking.md rename to packages/azure/docs/models/thinking.md diff --git a/packages/mistralai_azure/docs/models/tool.md b/packages/azure/docs/models/tool.md similarity index 100% rename from packages/mistralai_azure/docs/models/tool.md rename to packages/azure/docs/models/tool.md diff --git a/packages/mistralai_azure/docs/models/toolcall.md b/packages/azure/docs/models/toolcall.md similarity index 100% rename from packages/mistralai_azure/docs/models/toolcall.md rename to packages/azure/docs/models/toolcall.md diff --git a/packages/mistralai_azure/docs/models/toolchoice.md b/packages/azure/docs/models/toolchoice.md similarity index 100% rename from packages/mistralai_azure/docs/models/toolchoice.md rename to packages/azure/docs/models/toolchoice.md diff --git a/packages/mistralai_azure/docs/models/toolchoiceenum.md b/packages/azure/docs/models/toolchoiceenum.md similarity index 100% rename from packages/mistralai_azure/docs/models/toolchoiceenum.md rename to packages/azure/docs/models/toolchoiceenum.md diff --git a/packages/mistralai_gcp/docs/models/toolmessage.md b/packages/azure/docs/models/toolmessage.md similarity index 92% rename from packages/mistralai_gcp/docs/models/toolmessage.md rename to packages/azure/docs/models/toolmessage.md index a54f4933..7201481e 100644 --- a/packages/mistralai_gcp/docs/models/toolmessage.md +++ b/packages/azure/docs/models/toolmessage.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `role` | *Literal["tool"]* | :heavy_check_mark: | N/A | | `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | | `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolmessagecontent.md b/packages/azure/docs/models/toolmessagecontent.md similarity index 100% rename from packages/mistralai_azure/docs/models/toolmessagecontent.md rename to packages/azure/docs/models/toolmessagecontent.md diff --git a/packages/mistralai_azure/docs/models/tooltypes.md b/packages/azure/docs/models/tooltypes.md similarity index 100% rename from packages/mistralai_azure/docs/models/tooltypes.md rename to packages/azure/docs/models/tooltypes.md diff --git a/packages/mistralai_azure/docs/models/usageinfo.md b/packages/azure/docs/models/usageinfo.md similarity index 100% rename from packages/mistralai_azure/docs/models/usageinfo.md rename to packages/azure/docs/models/usageinfo.md diff --git a/packages/mistralai_gcp/docs/models/usermessage.md b/packages/azure/docs/models/usermessage.md similarity index 89% rename from packages/mistralai_gcp/docs/models/usermessage.md rename to packages/azure/docs/models/usermessage.md index 63b01310..e7a932ed 100644 --- a/packages/mistralai_gcp/docs/models/usermessage.md +++ b/packages/azure/docs/models/usermessage.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `role` | *Literal["user"]* | :heavy_check_mark: | N/A | +| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/usermessagecontent.md b/packages/azure/docs/models/usermessagecontent.md similarity index 100% rename from packages/mistralai_azure/docs/models/usermessagecontent.md rename to packages/azure/docs/models/usermessagecontent.md diff --git a/packages/mistralai_azure/docs/models/utils/retryconfig.md b/packages/azure/docs/models/utils/retryconfig.md similarity index 100% rename from packages/mistralai_azure/docs/models/utils/retryconfig.md rename to packages/azure/docs/models/utils/retryconfig.md diff --git a/packages/mistralai_azure/docs/models/validationerror.md b/packages/azure/docs/models/validationerror.md similarity index 100% rename from packages/mistralai_azure/docs/models/validationerror.md rename to packages/azure/docs/models/validationerror.md diff --git a/packages/mistralai_azure/docs/sdks/chat/README.md b/packages/azure/docs/sdks/chat/README.md similarity index 95% rename from packages/mistralai_azure/docs/sdks/chat/README.md rename to packages/azure/docs/sdks/chat/README.md index 26d20bb4..560ffa83 100644 --- a/packages/mistralai_azure/docs/sdks/chat/README.md +++ b/packages/azure/docs/sdks/chat/README.md @@ -8,7 +8,7 @@ Chat Completion API. ### Available Operations * [stream](#stream) - Stream chat completion -* [create](#create) - Chat Completion +* [complete](#complete) - Chat Completion ## stream @@ -17,21 +17,27 @@ Mistral AI provides the ability to stream responses back to a client in order to ### Example Usage ```python -from mistralai_azure import MistralAzure +from mistralai.azure.client import MistralAzure import os +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, ) - res = s.chat.stream(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, -], model="azureai") +], model=AZURE_MODEL) if res is not None: for event in res: @@ -69,32 +75,38 @@ if res is not None: | --------------- | ----------- | ------------ | | models.SDKError | 4xx-5xx | */* | -## create +## complete Chat Completion ### Example Usage ```python -from mistralai_azure import MistralAzure +from mistralai.azure.client import MistralAzure import os +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, ) - res = s.chat.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, -], model="azureai") +], model=AZURE_MODEL) if res is not None: # handle response - pass + print(res.choices[0].message.content) ``` diff --git a/packages/mistralai_azure/docs/sdks/mistralazure/README.md b/packages/azure/docs/sdks/mistralazure/README.md similarity index 100% rename from packages/mistralai_azure/docs/sdks/mistralazure/README.md rename to packages/azure/docs/sdks/mistralazure/README.md diff --git a/packages/mistralai_azure/py.typed b/packages/azure/py.typed similarity index 100% rename from packages/mistralai_azure/py.typed rename to packages/azure/py.typed diff --git a/packages/mistralai_azure/pylintrc b/packages/azure/pylintrc similarity index 100% rename from packages/mistralai_azure/pylintrc rename to packages/azure/pylintrc diff --git a/packages/mistralai_azure/pyproject.toml b/packages/azure/pyproject.toml similarity index 79% rename from packages/mistralai_azure/pyproject.toml rename to packages/azure/pyproject.toml index d129a290..3b9aa829 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/azure/pyproject.toml @@ -1,6 +1,6 @@ [project] -name = "mistralai_azure" -version = "1.8.0" +name = "mistralai-azure" +version = "2.0.0a4" description = "Python Client SDK for the Mistral AI API in Azure." authors = [{ name = "Mistral" }] requires-python = ">=3.10" @@ -21,20 +21,20 @@ dev = [ ] [tool.setuptools.package-data] -"*" = ["py.typed", "src/mistralai_azure/py.typed"] +"*" = ["py.typed", "src/mistralai/azure/client/py.typed"] [tool.hatch.build.targets.sdist] -include = ["src/mistralai_azure"] +include = ["src/mistralai"] [tool.hatch.build.targets.sdist.force-include] "py.typed" = "py.typed" -"src/mistralai_azure/py.typed" = "src/mistralai_azure/py.typed" +"src/mistralai/azure/client/py.typed" = "src/mistralai/azure/client/py.typed" [tool.hatch.build.targets.wheel] -include = ["src/mistralai_azure"] +include = ["src/mistralai"] [tool.hatch.build.targets.wheel.sources] -"src/mistralai_azure" = "mistralai_azure" +"src" = "" [virtualenvs] in-project = true @@ -48,6 +48,7 @@ pythonpath = ["src"] [tool.mypy] disable_error_code = "misc" +namespace_packages = true explicit_package_bases = true mypy_path = "src" diff --git a/packages/mistralai_gcp/scripts/prepare_readme.py b/packages/azure/scripts/prepare_readme.py similarity index 96% rename from packages/mistralai_gcp/scripts/prepare_readme.py rename to packages/azure/scripts/prepare_readme.py index 6c4b9932..2b2577ea 100644 --- a/packages/mistralai_gcp/scripts/prepare_readme.py +++ b/packages/azure/scripts/prepare_readme.py @@ -10,7 +10,7 @@ GITHUB_URL = ( GITHUB_URL[: -len(".git")] if GITHUB_URL.endswith(".git") else GITHUB_URL ) - REPO_SUBDIR = "packages/mistralai_gcp" + REPO_SUBDIR = "packages/azure" # Ensure the subdirectory has a trailing slash if not REPO_SUBDIR.endswith("/"): REPO_SUBDIR += "/" diff --git a/packages/mistralai_azure/scripts/publish.sh b/packages/azure/scripts/publish.sh similarity index 100% rename from packages/mistralai_azure/scripts/publish.sh rename to packages/azure/scripts/publish.sh diff --git a/packages/mistralai_azure/src/mistralai_azure/__init__.py b/packages/azure/src/mistralai/azure/client/__init__.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/__init__.py rename to packages/azure/src/mistralai/azure/client/__init__.py diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/__init__.py b/packages/azure/src/mistralai/azure/client/_hooks/__init__.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/_hooks/__init__.py rename to packages/azure/src/mistralai/azure/client/_hooks/__init__.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/registration.py b/packages/azure/src/mistralai/azure/client/_hooks/registration.py similarity index 70% rename from packages/mistralai_gcp/src/mistralai_gcp/_hooks/registration.py rename to packages/azure/src/mistralai/azure/client/_hooks/registration.py index 304edfa2..d5a49cc3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/registration.py +++ b/packages/azure/src/mistralai/azure/client/_hooks/registration.py @@ -1,15 +1,12 @@ -from .custom_user_agent import CustomUserAgentHook from .types import Hooks + # This file is only ever generated once on the first generation and then is free to be modified. # Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them # in this file or in separate files in the hooks folder. -def init_hooks(hooks: Hooks): - # pylint: disable=unused-argument +def init_hooks(_hooks: Hooks) -> None: """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook with an instance of a hook that implements that specific Hook interface - Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance - """ - hooks.register_before_request_hook(CustomUserAgentHook()) + Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance""" diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py b/packages/azure/src/mistralai/azure/client/_hooks/sdkhooks.py similarity index 97% rename from packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py rename to packages/azure/src/mistralai/azure/client/_hooks/sdkhooks.py index 37ff4e9f..2080681b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py +++ b/packages/azure/src/mistralai/azure/client/_hooks/sdkhooks.py @@ -13,7 +13,7 @@ ) from .registration import init_hooks from typing import List, Optional, Tuple -from mistralai_azure.httpclient import HttpClient +from mistralai.azure.client.httpclient import HttpClient class SDKHooks(Hooks): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py b/packages/azure/src/mistralai/azure/client/_hooks/types.py similarity index 95% rename from packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py rename to packages/azure/src/mistralai/azure/client/_hooks/types.py index f8088f4c..3e4e3955 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py +++ b/packages/azure/src/mistralai/azure/client/_hooks/types.py @@ -2,8 +2,8 @@ from abc import ABC, abstractmethod import httpx -from mistralai_gcp.httpclient import HttpClient -from mistralai_gcp.sdkconfiguration import SDKConfiguration +from mistralai.azure.client.httpclient import HttpClient +from mistralai.azure.client.sdkconfiguration import SDKConfiguration from typing import Any, Callable, List, Optional, Tuple, Union diff --git a/packages/mistralai_azure/src/mistralai_azure/_version.py b/packages/azure/src/mistralai/azure/client/_version.py similarity index 69% rename from packages/mistralai_azure/src/mistralai_azure/_version.py rename to packages/azure/src/mistralai/azure/client/_version.py index 79277f9a..4448d2a0 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_version.py +++ b/packages/azure/src/mistralai/azure/client/_version.py @@ -2,11 +2,11 @@ import importlib.metadata -__title__: str = "mistralai_azure" -__version__: str = "1.8.1" +__title__: str = "mistralai-azure" +__version__: str = "2.0.0a4" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 1.8.1 2.794.1 1.0.0 mistralai_azure" +__user_agent__: str = "speakeasy-sdk/python 2.0.0a4 2.794.1 1.0.0 mistralai-azure" try: if __package__ is not None: diff --git a/packages/mistralai_azure/src/mistralai_azure/basesdk.py b/packages/azure/src/mistralai/azure/client/basesdk.py similarity index 98% rename from packages/mistralai_azure/src/mistralai_azure/basesdk.py rename to packages/azure/src/mistralai/azure/client/basesdk.py index 89f7dc49..b0391ac0 100644 --- a/packages/mistralai_azure/src/mistralai_azure/basesdk.py +++ b/packages/azure/src/mistralai/azure/client/basesdk.py @@ -2,13 +2,17 @@ from .sdkconfiguration import SDKConfiguration import httpx -from mistralai_azure import models, utils -from mistralai_azure._hooks import ( +from mistralai.azure.client import models, utils +from mistralai.azure.client._hooks import ( AfterErrorContext, AfterSuccessContext, BeforeRequestContext, ) -from mistralai_azure.utils import RetryConfig, SerializedRequestBody, get_body_content +from mistralai.azure.client.utils import ( + RetryConfig, + SerializedRequestBody, + get_body_content, +) from typing import Callable, List, Mapping, Optional, Tuple from urllib.parse import parse_qs, urlparse diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/azure/src/mistralai/azure/client/chat.py similarity index 96% rename from packages/mistralai_azure/src/mistralai_azure/chat.py rename to packages/azure/src/mistralai/azure/client/chat.py index 10bb247f..3348bf47 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/azure/src/mistralai/azure/client/chat.py @@ -1,11 +1,11 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK -from mistralai_azure import models, utils -from mistralai_azure._hooks import HookContext -from mistralai_azure.types import OptionalNullable, UNSET -from mistralai_azure.utils import eventstreaming -from mistralai_azure.utils.unmarshal_json_response import unmarshal_json_response +from mistralai.azure.client import models, utils +from mistralai.azure.client._hooks import HookContext +from mistralai.azure.client.types import OptionalNullable, UNSET +from mistralai.azure.client.utils import eventstreaming +from mistralai.azure.client.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Dict, List, Mapping, Optional, Union @@ -15,13 +15,21 @@ class Chat(BaseSDK): def stream( self, *, - messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ @@ -97,7 +105,9 @@ def stream( stop=stop, random_seed=random_seed, metadata=metadata, - messages=utils.get_pydantic_model(messages, List[models.Messages]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] ), @@ -185,13 +195,21 @@ def stream( async def stream_async( self, *, - messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ @@ -267,7 +285,9 @@ async def stream_async( stop=stop, random_seed=random_seed, metadata=metadata, - messages=utils.get_pydantic_model(messages, List[models.Messages]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] ), @@ -356,8 +376,8 @@ def complete( self, *, messages: Union[ - List[models.ChatCompletionRequestMessages], - List[models.ChatCompletionRequestMessagesTypedDict], + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], ], model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, @@ -444,7 +464,7 @@ def complete( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionRequestMessages] + messages, List[models.ChatCompletionRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -526,8 +546,8 @@ async def complete_async( self, *, messages: Union[ - List[models.ChatCompletionRequestMessages], - List[models.ChatCompletionRequestMessagesTypedDict], + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], ], model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, @@ -614,7 +634,7 @@ async def complete_async( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionRequestMessages] + messages, List[models.ChatCompletionRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] diff --git a/packages/mistralai_azure/src/mistralai_azure/httpclient.py b/packages/azure/src/mistralai/azure/client/httpclient.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/httpclient.py rename to packages/azure/src/mistralai/azure/client/httpclient.py diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/azure/src/mistralai/azure/client/models/__init__.py similarity index 85% rename from packages/mistralai_azure/src/mistralai_azure/models/__init__.py rename to packages/azure/src/mistralai/azure/client/models/__init__.py index 9baa3ff1..51db6a38 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ b/packages/azure/src/mistralai/azure/client/models/__init__.py @@ -11,7 +11,6 @@ AssistantMessage, AssistantMessageContent, AssistantMessageContentTypedDict, - AssistantMessageRole, AssistantMessageTypedDict, ) from .chatcompletionchoice import ( @@ -21,8 +20,8 @@ ) from .chatcompletionrequest import ( ChatCompletionRequest, - ChatCompletionRequestMessages, - ChatCompletionRequestMessagesTypedDict, + ChatCompletionRequestMessage, + ChatCompletionRequestMessageTypedDict, ChatCompletionRequestStop, ChatCompletionRequestStopTypedDict, ChatCompletionRequestToolChoice, @@ -35,33 +34,29 @@ ) from .chatcompletionstreamrequest import ( ChatCompletionStreamRequest, + ChatCompletionStreamRequestMessage, + ChatCompletionStreamRequestMessageTypedDict, + ChatCompletionStreamRequestStop, + ChatCompletionStreamRequestStopTypedDict, ChatCompletionStreamRequestToolChoice, ChatCompletionStreamRequestToolChoiceTypedDict, ChatCompletionStreamRequestTypedDict, - Messages, - MessagesTypedDict, - Stop, - StopTypedDict, ) from .completionchunk import CompletionChunk, CompletionChunkTypedDict from .completionevent import CompletionEvent, CompletionEventTypedDict from .completionresponsestreamchoice import ( CompletionResponseStreamChoice, + CompletionResponseStreamChoiceFinishReason, CompletionResponseStreamChoiceTypedDict, - FinishReason, ) from .contentchunk import ContentChunk, ContentChunkTypedDict from .deltamessage import ( - Content, - ContentTypedDict, DeltaMessage, + DeltaMessageContent, + DeltaMessageContentTypedDict, DeltaMessageTypedDict, ) - from .documenturlchunk import ( - DocumentURLChunk, - DocumentURLChunkType, - DocumentURLChunkTypedDict, - ) + from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .filechunk import FileChunk, FileChunkTypedDict from .function import Function, FunctionTypedDict from .functioncall import ( @@ -72,13 +67,13 @@ ) from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .imagedetail import ImageDetail from .imageurl import ImageURL, ImageURLTypedDict from .imageurlchunk import ( ImageURLChunk, - ImageURLChunkImageURL, - ImageURLChunkImageURLTypedDict, - ImageURLChunkType, ImageURLChunkTypedDict, + ImageURLUnion, + ImageURLUnionTypedDict, ) from .jsonschema import JSONSchema, JSONSchemaTypedDict from .mistralpromptmode import MistralPromptMode @@ -97,18 +92,13 @@ from .ocrtableobject import Format, OCRTableObject, OCRTableObjectTypedDict from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict from .prediction import Prediction, PredictionTypedDict - from .referencechunk import ( - ReferenceChunk, - ReferenceChunkType, - ReferenceChunkTypedDict, - ) + from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats from .responsevalidationerror import ResponseValidationError from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .systemmessage import ( - Role, SystemMessage, SystemMessageContent, SystemMessageContentTypedDict, @@ -118,14 +108,8 @@ SystemMessageContentChunks, SystemMessageContentChunksTypedDict, ) - from .textchunk import TextChunk, TextChunkTypedDict, Type - from .thinkchunk import ( - ThinkChunk, - ThinkChunkType, - ThinkChunkTypedDict, - Thinking, - ThinkingTypedDict, - ) + from .textchunk import TextChunk, TextChunkTypedDict + from .thinkchunk import ThinkChunk, ThinkChunkTypedDict, Thinking, ThinkingTypedDict from .tool import Tool, ToolTypedDict from .toolcall import ToolCall, ToolCallTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict @@ -134,7 +118,6 @@ ToolMessage, ToolMessageContent, ToolMessageContentTypedDict, - ToolMessageRole, ToolMessageTypedDict, ) from .tooltypes import ToolTypes @@ -143,7 +126,6 @@ UserMessage, UserMessageContent, UserMessageContentTypedDict, - UserMessageRole, UserMessageTypedDict, ) from .validationerror import ( @@ -159,14 +141,13 @@ "AssistantMessage", "AssistantMessageContent", "AssistantMessageContentTypedDict", - "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", - "ChatCompletionRequestMessages", - "ChatCompletionRequestMessagesTypedDict", + "ChatCompletionRequestMessage", + "ChatCompletionRequestMessageTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", @@ -175,6 +156,10 @@ "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", + "ChatCompletionStreamRequestMessage", + "ChatCompletionStreamRequestMessageTypedDict", + "ChatCompletionStreamRequestStop", + "ChatCompletionStreamRequestStopTypedDict", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestToolChoiceTypedDict", "ChatCompletionStreamRequestTypedDict", @@ -183,21 +168,20 @@ "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", + "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", - "Content", "ContentChunk", "ContentChunkTypedDict", - "ContentTypedDict", "DeltaMessage", + "DeltaMessageContent", + "DeltaMessageContentTypedDict", "DeltaMessageTypedDict", "Document", "DocumentTypedDict", "DocumentURLChunk", - "DocumentURLChunkType", "DocumentURLChunkTypedDict", "FileChunk", "FileChunkTypedDict", - "FinishReason", "Format", "Function", "FunctionCall", @@ -207,19 +191,17 @@ "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", + "ImageDetail", "ImageURL", "ImageURLChunk", - "ImageURLChunkImageURL", - "ImageURLChunkImageURLTypedDict", - "ImageURLChunkType", "ImageURLChunkTypedDict", "ImageURLTypedDict", + "ImageURLUnion", + "ImageURLUnionTypedDict", "JSONSchema", "JSONSchemaTypedDict", "Loc", "LocTypedDict", - "Messages", - "MessagesTypedDict", "MistralAzureError", "MistralPromptMode", "NoResponseError", @@ -240,18 +222,14 @@ "Prediction", "PredictionTypedDict", "ReferenceChunk", - "ReferenceChunkType", "ReferenceChunkTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "ResponseValidationError", - "Role", "SDKError", "Security", "SecurityTypedDict", - "Stop", - "StopTypedDict", "SystemMessage", "SystemMessageContent", "SystemMessageContentChunks", @@ -262,7 +240,6 @@ "TextChunk", "TextChunkTypedDict", "ThinkChunk", - "ThinkChunkType", "ThinkChunkTypedDict", "Thinking", "ThinkingTypedDict", @@ -275,17 +252,14 @@ "ToolMessage", "ToolMessageContent", "ToolMessageContentTypedDict", - "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "ToolTypes", - "Type", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", - "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict", @@ -295,14 +269,13 @@ "AssistantMessage": ".assistantmessage", "AssistantMessageContent": ".assistantmessage", "AssistantMessageContentTypedDict": ".assistantmessage", - "AssistantMessageRole": ".assistantmessage", "AssistantMessageTypedDict": ".assistantmessage", "ChatCompletionChoice": ".chatcompletionchoice", "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", "ChatCompletionRequest": ".chatcompletionrequest", - "ChatCompletionRequestMessages": ".chatcompletionrequest", - "ChatCompletionRequestMessagesTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestMessage": ".chatcompletionrequest", + "ChatCompletionRequestMessageTypedDict": ".chatcompletionrequest", "ChatCompletionRequestStop": ".chatcompletionrequest", "ChatCompletionRequestStopTypedDict": ".chatcompletionrequest", "ChatCompletionRequestToolChoice": ".chatcompletionrequest", @@ -311,28 +284,27 @@ "ChatCompletionResponse": ".chatcompletionresponse", "ChatCompletionResponseTypedDict": ".chatcompletionresponse", "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessage": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessageTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStop": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStopTypedDict": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", - "Messages": ".chatcompletionstreamrequest", - "MessagesTypedDict": ".chatcompletionstreamrequest", - "Stop": ".chatcompletionstreamrequest", - "StopTypedDict": ".chatcompletionstreamrequest", "CompletionChunk": ".completionchunk", "CompletionChunkTypedDict": ".completionchunk", "CompletionEvent": ".completionevent", "CompletionEventTypedDict": ".completionevent", "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", - "FinishReason": ".completionresponsestreamchoice", "ContentChunk": ".contentchunk", "ContentChunkTypedDict": ".contentchunk", - "Content": ".deltamessage", - "ContentTypedDict": ".deltamessage", "DeltaMessage": ".deltamessage", + "DeltaMessageContent": ".deltamessage", + "DeltaMessageContentTypedDict": ".deltamessage", "DeltaMessageTypedDict": ".deltamessage", "DocumentURLChunk": ".documenturlchunk", - "DocumentURLChunkType": ".documenturlchunk", "DocumentURLChunkTypedDict": ".documenturlchunk", "FileChunk": ".filechunk", "FileChunkTypedDict": ".filechunk", @@ -346,13 +318,13 @@ "FunctionNameTypedDict": ".functionname", "HTTPValidationError": ".httpvalidationerror", "HTTPValidationErrorData": ".httpvalidationerror", + "ImageDetail": ".imagedetail", "ImageURL": ".imageurl", "ImageURLTypedDict": ".imageurl", "ImageURLChunk": ".imageurlchunk", - "ImageURLChunkImageURL": ".imageurlchunk", - "ImageURLChunkImageURLTypedDict": ".imageurlchunk", - "ImageURLChunkType": ".imageurlchunk", "ImageURLChunkTypedDict": ".imageurlchunk", + "ImageURLUnion": ".imageurlchunk", + "ImageURLUnionTypedDict": ".imageurlchunk", "JSONSchema": ".jsonschema", "JSONSchemaTypedDict": ".jsonschema", "MistralPromptMode": ".mistralpromptmode", @@ -378,7 +350,6 @@ "Prediction": ".prediction", "PredictionTypedDict": ".prediction", "ReferenceChunk": ".referencechunk", - "ReferenceChunkType": ".referencechunk", "ReferenceChunkTypedDict": ".referencechunk", "ResponseFormat": ".responseformat", "ResponseFormatTypedDict": ".responseformat", @@ -387,7 +358,6 @@ "SDKError": ".sdkerror", "Security": ".security", "SecurityTypedDict": ".security", - "Role": ".systemmessage", "SystemMessage": ".systemmessage", "SystemMessageContent": ".systemmessage", "SystemMessageContentTypedDict": ".systemmessage", @@ -396,9 +366,7 @@ "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", "TextChunk": ".textchunk", "TextChunkTypedDict": ".textchunk", - "Type": ".textchunk", "ThinkChunk": ".thinkchunk", - "ThinkChunkType": ".thinkchunk", "ThinkChunkTypedDict": ".thinkchunk", "Thinking": ".thinkchunk", "ThinkingTypedDict": ".thinkchunk", @@ -412,7 +380,6 @@ "ToolMessage": ".toolmessage", "ToolMessageContent": ".toolmessage", "ToolMessageContentTypedDict": ".toolmessage", - "ToolMessageRole": ".toolmessage", "ToolMessageTypedDict": ".toolmessage", "ToolTypes": ".tooltypes", "UsageInfo": ".usageinfo", @@ -420,7 +387,6 @@ "UserMessage": ".usermessage", "UserMessageContent": ".usermessage", "UserMessageContentTypedDict": ".usermessage", - "UserMessageRole": ".usermessage", "UserMessageTypedDict": ".usermessage", "Loc": ".validationerror", "LocTypedDict": ".validationerror", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/azure/src/mistralai/azure/client/models/assistantmessage.py similarity index 80% rename from packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py rename to packages/azure/src/mistralai/azure/client/models/assistantmessage.py index 17d740b6..f5793f94 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py +++ b/packages/azure/src/mistralai/azure/client/models/assistantmessage.py @@ -3,16 +3,19 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_gcp.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, ) +from mistralai.azure.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict AssistantMessageContentTypedDict = TypeAliasType( @@ -25,18 +28,22 @@ ) -AssistantMessageRole = Literal["assistant",] - - class AssistantMessageTypedDict(TypedDict): + role: Literal["assistant"] content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - role: NotRequired[AssistantMessageRole] class AssistantMessage(BaseModel): + ROLE: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" + content: OptionalNullable[AssistantMessageContent] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET @@ -44,11 +51,9 @@ class AssistantMessage(BaseModel): prefix: Optional[bool] = False r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - role: Optional[AssistantMessageRole] = "assistant" - @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["content", "tool_calls", "prefix", "role"] + optional_fields = ["role", "content", "tool_calls", "prefix"] nullable_fields = ["content", "tool_calls"] null_default_fields = [] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionchoice.py similarity index 91% rename from packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py rename to packages/azure/src/mistralai/azure/client/models/chatcompletionchoice.py index 7c6eb933..67b5ba69 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionchoice.py @@ -2,7 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from mistralai_azure.types import BaseModel, UnrecognizedStr +from mistralai.azure.client.types import BaseModel, UnrecognizedStr from typing import Literal, Union from typing_extensions import TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py similarity index 97% rename from packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py rename to packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py index a7b095f3..92179095 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py @@ -11,14 +11,14 @@ from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, ) -from mistralai_azure.utils import get_discriminator +from mistralai.azure.client.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -36,8 +36,8 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -ChatCompletionRequestMessagesTypedDict = TypeAliasType( - "ChatCompletionRequestMessagesTypedDict", +ChatCompletionRequestMessageTypedDict = TypeAliasType( + "ChatCompletionRequestMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -47,7 +47,7 @@ ) -ChatCompletionRequestMessages = Annotated[ +ChatCompletionRequestMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -72,7 +72,7 @@ class ChatCompletionRequestTypedDict(TypedDict): - messages: List[ChatCompletionRequestMessagesTypedDict] + messages: List[ChatCompletionRequestMessageTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" model: NotRequired[str] r"""The ID of the model to use for this request.""" @@ -112,7 +112,7 @@ class ChatCompletionRequestTypedDict(TypedDict): class ChatCompletionRequest(BaseModel): - messages: List[ChatCompletionRequestMessages] + messages: List[ChatCompletionRequestMessage] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" model: Optional[str] = "azureai" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionresponse.py similarity index 92% rename from packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py rename to packages/azure/src/mistralai/azure/client/models/chatcompletionresponse.py index 7a66f322..d41f9c6f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionresponse.py @@ -3,7 +3,7 @@ from __future__ import annotations from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai_azure.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing import List from typing_extensions import TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py similarity index 94% rename from packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py rename to packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py index 96cd631b..be21eed2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py @@ -11,29 +11,33 @@ from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, ) -from mistralai_azure.utils import get_discriminator +from mistralai.azure.client.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]]) +ChatCompletionStreamRequestStopTypedDict = TypeAliasType( + "ChatCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -Stop = TypeAliasType("Stop", Union[str, List[str]]) +ChatCompletionStreamRequestStop = TypeAliasType( + "ChatCompletionStreamRequestStop", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -MessagesTypedDict = TypeAliasType( - "MessagesTypedDict", +ChatCompletionStreamRequestMessageTypedDict = TypeAliasType( + "ChatCompletionStreamRequestMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -43,7 +47,7 @@ ) -Messages = Annotated[ +ChatCompletionStreamRequestMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -68,7 +72,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): - messages: List[MessagesTypedDict] + messages: List[ChatCompletionStreamRequestMessageTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" model: NotRequired[str] r"""The ID of the model to use for this request.""" @@ -79,7 +83,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" stream: NotRequired[bool] - stop: NotRequired[StopTypedDict] + stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" @@ -107,7 +111,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): class ChatCompletionStreamRequest(BaseModel): - messages: List[Messages] + messages: List[ChatCompletionStreamRequestMessage] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" model: Optional[str] = "azureai" @@ -124,7 +128,7 @@ class ChatCompletionStreamRequest(BaseModel): stream: Optional[bool] = True - stop: Optional[Stop] = None + stop: Optional[ChatCompletionStreamRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: OptionalNullable[int] = UNSET diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py b/packages/azure/src/mistralai/azure/client/models/completionchunk.py similarity index 94% rename from packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py rename to packages/azure/src/mistralai/azure/client/models/completionchunk.py index ca002f52..b94284b2 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/completionchunk.py @@ -6,7 +6,7 @@ CompletionResponseStreamChoiceTypedDict, ) from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai_gcp.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing import List, Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py b/packages/azure/src/mistralai/azure/client/models/completionevent.py similarity index 87% rename from packages/mistralai_azure/src/mistralai_azure/models/completionevent.py rename to packages/azure/src/mistralai/azure/client/models/completionevent.py index 5a2039c2..c4b27287 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py +++ b/packages/azure/src/mistralai/azure/client/models/completionevent.py @@ -2,7 +2,7 @@ from __future__ import annotations from .completionchunk import CompletionChunk, CompletionChunkTypedDict -from mistralai_azure.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing_extensions import TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py b/packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py similarity index 82% rename from packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py rename to packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py index ec9df528..2a4d053f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py +++ b/packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py @@ -2,13 +2,18 @@ from __future__ import annotations from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + UNSET_SENTINEL, + UnrecognizedStr, +) from pydantic import model_serializer from typing import Literal, Union from typing_extensions import TypedDict -FinishReason = Union[ +CompletionResponseStreamChoiceFinishReason = Union[ Literal[ "stop", "length", @@ -22,7 +27,7 @@ class CompletionResponseStreamChoiceTypedDict(TypedDict): index: int delta: DeltaMessageTypedDict - finish_reason: Nullable[FinishReason] + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] class CompletionResponseStreamChoice(BaseModel): @@ -30,7 +35,7 @@ class CompletionResponseStreamChoice(BaseModel): delta: DeltaMessage - finish_reason: Nullable[FinishReason] + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py b/packages/azure/src/mistralai/azure/client/models/contentchunk.py similarity index 93% rename from packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py rename to packages/azure/src/mistralai/azure/client/models/contentchunk.py index e6a3e24a..0f09f767 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/contentchunk.py @@ -4,7 +4,7 @@ from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from mistralai_azure.utils import get_discriminator +from mistralai.azure.client.utils import get_discriminator from pydantic import Discriminator, Tag from typing import Union from typing_extensions import Annotated, TypeAliasType diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py b/packages/azure/src/mistralai/azure/client/models/deltamessage.py similarity index 81% rename from packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py rename to packages/azure/src/mistralai/azure/client/models/deltamessage.py index 1801ac76..2c01feae 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py +++ b/packages/azure/src/mistralai/azure/client/models/deltamessage.py @@ -3,7 +3,7 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_gcp.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, @@ -15,24 +15,26 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ContentTypedDict = TypeAliasType( - "ContentTypedDict", Union[str, List[ContentChunkTypedDict]] +DeltaMessageContentTypedDict = TypeAliasType( + "DeltaMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] ) -Content = TypeAliasType("Content", Union[str, List[ContentChunk]]) +DeltaMessageContent = TypeAliasType( + "DeltaMessageContent", Union[str, List[ContentChunk]] +) class DeltaMessageTypedDict(TypedDict): role: NotRequired[Nullable[str]] - content: NotRequired[Nullable[ContentTypedDict]] + content: NotRequired[Nullable[DeltaMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] class DeltaMessage(BaseModel): role: OptionalNullable[str] = UNSET - content: OptionalNullable[Content] = UNSET + content: OptionalNullable[DeltaMessageContent] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET diff --git a/packages/mistralai_azure/src/mistralai_azure/models/documenturlchunk.py b/packages/azure/src/mistralai/azure/client/models/documenturlchunk.py similarity index 72% rename from packages/mistralai_azure/src/mistralai_azure/models/documenturlchunk.py rename to packages/azure/src/mistralai/azure/client/models/documenturlchunk.py index ea8d5625..345bafc2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/documenturlchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/documenturlchunk.py @@ -1,39 +1,45 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, ) +from mistralai.azure.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -DocumentURLChunkType = Literal["document_url",] +from typing_extensions import Annotated, NotRequired, TypedDict class DocumentURLChunkTypedDict(TypedDict): document_url: str + type: Literal["document_url"] document_name: NotRequired[Nullable[str]] r"""The filename of the document""" - type: NotRequired[DocumentURLChunkType] class DocumentURLChunk(BaseModel): document_url: str + TYPE: Annotated[ + Annotated[ + Optional[Literal["document_url"]], + AfterValidator(validate_const("document_url")), + ], + pydantic.Field(alias="type"), + ] = "document_url" + document_name: OptionalNullable[str] = UNSET r"""The filename of the document""" - type: Optional[DocumentURLChunkType] = "document_url" - @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["document_name", "type"] + optional_fields = ["type", "document_name"] nullable_fields = ["document_name"] null_default_fields = [] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/filechunk.py b/packages/azure/src/mistralai/azure/client/models/filechunk.py similarity index 83% rename from packages/mistralai_azure/src/mistralai_azure/models/filechunk.py rename to packages/azure/src/mistralai/azure/client/models/filechunk.py index 2c3edc07..829f03d8 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/filechunk.py +++ b/packages/azure/src/mistralai/azure/client/models/filechunk.py @@ -1,8 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel -from mistralai_azure.utils import validate_const +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import validate_const import pydantic from pydantic.functional_validators import AfterValidator from typing import Literal, Optional diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py b/packages/azure/src/mistralai/azure/client/models/function.py similarity index 90% rename from packages/mistralai_gcp/src/mistralai_gcp/models/function.py rename to packages/azure/src/mistralai/azure/client/models/function.py index 7ad1ae64..f4edce0f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py +++ b/packages/azure/src/mistralai/azure/client/models/function.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing import Any, Dict, Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py b/packages/azure/src/mistralai/azure/client/models/functioncall.py similarity index 91% rename from packages/mistralai_azure/src/mistralai_azure/models/functioncall.py rename to packages/azure/src/mistralai/azure/client/models/functioncall.py index dd93c462..d476792c 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py +++ b/packages/azure/src/mistralai/azure/client/models/functioncall.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing import Any, Dict, Union from typing_extensions import TypeAliasType, TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/functionname.py b/packages/azure/src/mistralai/azure/client/models/functionname.py similarity index 89% rename from packages/mistralai_azure/src/mistralai_azure/models/functionname.py rename to packages/azure/src/mistralai/azure/client/models/functionname.py index b55c82af..839e0d55 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/functionname.py +++ b/packages/azure/src/mistralai/azure/client/models/functionname.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing_extensions import TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py b/packages/azure/src/mistralai/azure/client/models/httpvalidationerror.py similarity index 87% rename from packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py rename to packages/azure/src/mistralai/azure/client/models/httpvalidationerror.py index 56607d94..40bccddc 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py +++ b/packages/azure/src/mistralai/azure/client/models/httpvalidationerror.py @@ -4,8 +4,8 @@ from .validationerror import ValidationError from dataclasses import dataclass, field import httpx -from mistralai_azure.models import MistralAzureError -from mistralai_azure.types import BaseModel +from mistralai.azure.client.models import MistralAzureError +from mistralai.azure.client.types import BaseModel from typing import List, Optional diff --git a/packages/azure/src/mistralai/azure/client/models/imagedetail.py b/packages/azure/src/mistralai/azure/client/models/imagedetail.py new file mode 100644 index 00000000..2d074cee --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/imagedetail.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import UnrecognizedStr +from typing import Literal, Union + + +ImageDetail = Union[ + Literal[ + "low", + "auto", + "high", + ], + UnrecognizedStr, +] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py b/packages/azure/src/mistralai/azure/client/models/imageurl.py similarity index 87% rename from packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py rename to packages/azure/src/mistralai/azure/client/models/imageurl.py index 20d4ba77..b3c705e3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py +++ b/packages/azure/src/mistralai/azure/client/models/imageurl.py @@ -1,7 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import ( +from .imagedetail import ImageDetail +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, @@ -14,13 +15,13 @@ class ImageURLTypedDict(TypedDict): url: str - detail: NotRequired[Nullable[str]] + detail: NotRequired[Nullable[ImageDetail]] class ImageURL(BaseModel): url: str - detail: OptionalNullable[str] = UNSET + detail: OptionalNullable[ImageDetail] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py b/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py new file mode 100644 index 00000000..ee6de50f --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imageurl import ImageURL, ImageURLTypedDict +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +ImageURLUnionTypedDict = TypeAliasType( + "ImageURLUnionTypedDict", Union[ImageURLTypedDict, str] +) + + +ImageURLUnion = TypeAliasType("ImageURLUnion", Union[ImageURL, str]) + + +class ImageURLChunkTypedDict(TypedDict): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnionTypedDict + type: Literal["image_url"] + + +class ImageURLChunk(BaseModel): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnion + + TYPE: Annotated[ + Annotated[ + Optional[Literal["image_url"]], AfterValidator(validate_const("image_url")) + ], + pydantic.Field(alias="type"), + ] = "image_url" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py b/packages/azure/src/mistralai/azure/client/models/jsonschema.py similarity index 97% rename from packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py rename to packages/azure/src/mistralai/azure/client/models/jsonschema.py index 0f7563fc..5aaa490a 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py +++ b/packages/azure/src/mistralai/azure/client/models/jsonschema.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/mistralazureerror.py b/packages/azure/src/mistralai/azure/client/models/mistralazureerror.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/models/mistralazureerror.py rename to packages/azure/src/mistralai/azure/client/models/mistralazureerror.py diff --git a/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py b/packages/azure/src/mistralai/azure/client/models/mistralpromptmode.py similarity index 89% rename from packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py rename to packages/azure/src/mistralai/azure/client/models/mistralpromptmode.py index 77230b7e..26e7adbd 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py +++ b/packages/azure/src/mistralai/azure/client/models/mistralpromptmode.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import UnrecognizedStr +from mistralai.azure.client.types import UnrecognizedStr from typing import Literal, Union diff --git a/packages/mistralai_azure/src/mistralai_azure/models/no_response_error.py b/packages/azure/src/mistralai/azure/client/models/no_response_error.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/models/no_response_error.py rename to packages/azure/src/mistralai/azure/client/models/no_response_error.py diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrimageobject.py b/packages/azure/src/mistralai/azure/client/models/ocrimageobject.py similarity index 98% rename from packages/mistralai_azure/src/mistralai_azure/models/ocrimageobject.py rename to packages/azure/src/mistralai/azure/client/models/ocrimageobject.py index 9d0dd01d..38e9d3e4 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/ocrimageobject.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrimageobject.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrpagedimensions.py b/packages/azure/src/mistralai/azure/client/models/ocrpagedimensions.py similarity index 91% rename from packages/mistralai_azure/src/mistralai_azure/models/ocrpagedimensions.py rename to packages/azure/src/mistralai/azure/client/models/ocrpagedimensions.py index efb62a58..12858da9 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/ocrpagedimensions.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrpagedimensions.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing_extensions import TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrpageobject.py b/packages/azure/src/mistralai/azure/client/models/ocrpageobject.py similarity index 98% rename from packages/mistralai_azure/src/mistralai_azure/models/ocrpageobject.py rename to packages/azure/src/mistralai/azure/client/models/ocrpageobject.py index e9571800..5fb821c1 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/ocrpageobject.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrpageobject.py @@ -4,7 +4,7 @@ from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict from .ocrtableobject import OCRTableObject, OCRTableObjectTypedDict -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py b/packages/azure/src/mistralai/azure/client/models/ocrrequest.py similarity index 99% rename from packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py rename to packages/azure/src/mistralai/azure/client/models/ocrrequest.py index e9c23afc..fece2713 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrrequest.py @@ -5,7 +5,7 @@ from .filechunk import FileChunk, FileChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrresponse.py b/packages/azure/src/mistralai/azure/client/models/ocrresponse.py similarity index 97% rename from packages/mistralai_azure/src/mistralai_azure/models/ocrresponse.py rename to packages/azure/src/mistralai/azure/client/models/ocrresponse.py index 3e43fa8e..787289fa 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/ocrresponse.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrresponse.py @@ -3,7 +3,7 @@ from __future__ import annotations from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrtableobject.py b/packages/azure/src/mistralai/azure/client/models/ocrtableobject.py similarity index 78% rename from packages/mistralai_azure/src/mistralai_azure/models/ocrtableobject.py rename to packages/azure/src/mistralai/azure/client/models/ocrtableobject.py index 189f059e..3e3c2583 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/ocrtableobject.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrtableobject.py @@ -1,15 +1,18 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel +from mistralai.azure.client.types import BaseModel, UnrecognizedStr import pydantic -from typing import Literal +from typing import Literal, Union from typing_extensions import Annotated, TypedDict -Format = Literal[ - "markdown", - "html", +Format = Union[ + Literal[ + "markdown", + "html", + ], + UnrecognizedStr, ] r"""Format of the table""" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrusageinfo.py b/packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py similarity index 97% rename from packages/mistralai_azure/src/mistralai_azure/models/ocrusageinfo.py rename to packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py index 1f5c9f1b..e2ceba35 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/ocrusageinfo.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/prediction.py b/packages/azure/src/mistralai/azure/client/models/prediction.py similarity index 89% rename from packages/mistralai_azure/src/mistralai_azure/models/prediction.py rename to packages/azure/src/mistralai/azure/client/models/prediction.py index b23a935c..6b8d6480 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/prediction.py +++ b/packages/azure/src/mistralai/azure/client/models/prediction.py @@ -1,8 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel -from mistralai_azure.utils import validate_const +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import validate_const import pydantic from pydantic.functional_validators import AfterValidator from typing import Literal, Optional diff --git a/packages/azure/src/mistralai/azure/client/models/referencechunk.py b/packages/azure/src/mistralai/azure/client/models/referencechunk.py new file mode 100644 index 00000000..e0bcb06b --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/referencechunk.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class ReferenceChunkTypedDict(TypedDict): + reference_ids: List[int] + type: Literal["reference"] + + +class ReferenceChunk(BaseModel): + reference_ids: List[int] + + TYPE: Annotated[ + Annotated[ + Optional[Literal["reference"]], AfterValidator(validate_const("reference")) + ], + pydantic.Field(alias="type"), + ] = "reference" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py b/packages/azure/src/mistralai/azure/client/models/responseformat.py similarity index 98% rename from packages/mistralai_azure/src/mistralai_azure/models/responseformat.py rename to packages/azure/src/mistralai/azure/client/models/responseformat.py index c989f3a4..39fb03a2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py +++ b/packages/azure/src/mistralai/azure/client/models/responseformat.py @@ -3,7 +3,7 @@ from __future__ import annotations from .jsonschema import JSONSchema, JSONSchemaTypedDict from .responseformats import ResponseFormats -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py b/packages/azure/src/mistralai/azure/client/models/responseformats.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/models/responseformats.py rename to packages/azure/src/mistralai/azure/client/models/responseformats.py diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responsevalidationerror.py b/packages/azure/src/mistralai/azure/client/models/responsevalidationerror.py similarity index 92% rename from packages/mistralai_azure/src/mistralai_azure/models/responsevalidationerror.py rename to packages/azure/src/mistralai/azure/client/models/responsevalidationerror.py index a33954cc..cbdffcbb 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responsevalidationerror.py +++ b/packages/azure/src/mistralai/azure/client/models/responsevalidationerror.py @@ -4,7 +4,7 @@ from typing import Optional from dataclasses import dataclass -from mistralai_azure.models import MistralAzureError +from mistralai.azure.client.models import MistralAzureError @dataclass(unsafe_hash=True) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py b/packages/azure/src/mistralai/azure/client/models/sdkerror.py similarity index 95% rename from packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py rename to packages/azure/src/mistralai/azure/client/models/sdkerror.py index 216d7f8f..a1e9aaca 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py +++ b/packages/azure/src/mistralai/azure/client/models/sdkerror.py @@ -4,7 +4,7 @@ from typing import Optional from dataclasses import dataclass -from mistralai_azure.models import MistralAzureError +from mistralai.azure.client.models import MistralAzureError MAX_MESSAGE_LEN = 10_000 diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/security.py b/packages/azure/src/mistralai/azure/client/models/security.py similarity index 80% rename from packages/mistralai_gcp/src/mistralai_gcp/models/security.py rename to packages/azure/src/mistralai/azure/client/models/security.py index 38574942..9b83ba98 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/security.py +++ b/packages/azure/src/mistralai/azure/client/models/security.py @@ -1,8 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel -from mistralai_gcp.utils import FieldMetadata, SecurityMetadata +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import FieldMetadata, SecurityMetadata from typing_extensions import Annotated, TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py b/packages/azure/src/mistralai/azure/client/models/systemmessage.py similarity index 57% rename from packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py rename to packages/azure/src/mistralai/azure/client/models/systemmessage.py index d74bdf32..38c280c8 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py +++ b/packages/azure/src/mistralai/azure/client/models/systemmessage.py @@ -5,9 +5,12 @@ SystemMessageContentChunks, SystemMessageContentChunksTypedDict, ) -from mistralai_gcp.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict SystemMessageContentTypedDict = TypeAliasType( @@ -21,15 +24,15 @@ ) -Role = Literal["system",] - - class SystemMessageTypedDict(TypedDict): content: SystemMessageContentTypedDict - role: NotRequired[Role] + role: Literal["system"] class SystemMessage(BaseModel): content: SystemMessageContent - role: Optional[Role] = "system" + ROLE: Annotated[ + Annotated[Literal["system"], AfterValidator(validate_const("system"))], + pydantic.Field(alias="role"), + ] = "system" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessagecontentchunks.py b/packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py similarity index 66% rename from packages/mistralai_gcp/src/mistralai_gcp/models/systemmessagecontentchunks.py rename to packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py index e0b5bbc3..225f38b7 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessagecontentchunks.py +++ b/packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py @@ -3,8 +3,7 @@ from __future__ import annotations from .textchunk import TextChunk, TextChunkTypedDict from .thinkchunk import ThinkChunk, ThinkChunkTypedDict -from mistralai_gcp.utils import get_discriminator -from pydantic import Discriminator, Tag +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType @@ -16,6 +15,5 @@ SystemMessageContentChunks = Annotated[ - Union[Annotated[TextChunk, Tag("text")], Annotated[ThinkChunk, Tag("thinking")]], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Union[TextChunk, ThinkChunk], Field(discriminator="TYPE") ] diff --git a/packages/azure/src/mistralai/azure/client/models/textchunk.py b/packages/azure/src/mistralai/azure/client/models/textchunk.py new file mode 100644 index 00000000..e513c143 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/textchunk.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict + + +class TextChunkTypedDict(TypedDict): + text: str + type: Literal["text"] + + +class TextChunk(BaseModel): + text: str + + TYPE: Annotated[ + Annotated[Literal["text"], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/thinkchunk.py b/packages/azure/src/mistralai/azure/client/models/thinkchunk.py similarity index 65% rename from packages/mistralai_gcp/src/mistralai_gcp/models/thinkchunk.py rename to packages/azure/src/mistralai/azure/client/models/thinkchunk.py index b88c0cb5..e769399f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/thinkchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/thinkchunk.py @@ -3,9 +3,12 @@ from __future__ import annotations from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from mistralai_gcp.types import BaseModel +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ThinkingTypedDict = TypeAliasType( @@ -16,20 +19,20 @@ Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) -ThinkChunkType = Literal["thinking",] - - class ThinkChunkTypedDict(TypedDict): thinking: List[ThinkingTypedDict] + type: Literal["thinking"] closed: NotRequired[bool] r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" - type: NotRequired[ThinkChunkType] class ThinkChunk(BaseModel): thinking: List[Thinking] + TYPE: Annotated[ + Annotated[Literal["thinking"], AfterValidator(validate_const("thinking"))], + pydantic.Field(alias="type"), + ] = "thinking" + closed: Optional[bool] = None r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" - - type: Optional[ThinkChunkType] = "thinking" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/tool.py b/packages/azure/src/mistralai/azure/client/models/tool.py similarity index 89% rename from packages/mistralai_azure/src/mistralai_azure/models/tool.py rename to packages/azure/src/mistralai/azure/client/models/tool.py index c91deec2..169305bc 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/tool.py +++ b/packages/azure/src/mistralai/azure/client/models/tool.py @@ -3,7 +3,7 @@ from __future__ import annotations from .function import Function, FunctionTypedDict from .tooltypes import ToolTypes -from mistralai_azure.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing import Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py b/packages/azure/src/mistralai/azure/client/models/toolcall.py similarity index 92% rename from packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py rename to packages/azure/src/mistralai/azure/client/models/toolcall.py index 23ef157a..a589b1b3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py +++ b/packages/azure/src/mistralai/azure/client/models/toolcall.py @@ -3,7 +3,7 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes -from mistralai_gcp.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing import Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py b/packages/azure/src/mistralai/azure/client/models/toolchoice.py similarity index 93% rename from packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py rename to packages/azure/src/mistralai/azure/client/models/toolchoice.py index 93b4b7fe..1f623222 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py +++ b/packages/azure/src/mistralai/azure/client/models/toolchoice.py @@ -3,7 +3,7 @@ from __future__ import annotations from .functionname import FunctionName, FunctionNameTypedDict from .tooltypes import ToolTypes -from mistralai_azure.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing import Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py b/packages/azure/src/mistralai/azure/client/models/toolchoiceenum.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py rename to packages/azure/src/mistralai/azure/client/models/toolchoiceenum.py diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py b/packages/azure/src/mistralai/azure/client/models/toolmessage.py similarity index 77% rename from packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py rename to packages/azure/src/mistralai/azure/client/models/toolmessage.py index 4bc5c9a9..a73fd6bf 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py +++ b/packages/azure/src/mistralai/azure/client/models/toolmessage.py @@ -2,16 +2,19 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, ) +from mistralai.azure.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ToolMessageContentTypedDict = TypeAliasType( @@ -22,28 +25,28 @@ ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) -ToolMessageRole = Literal["tool",] - - class ToolMessageTypedDict(TypedDict): content: Nullable[ToolMessageContentTypedDict] + role: Literal["tool"] tool_call_id: NotRequired[Nullable[str]] name: NotRequired[Nullable[str]] - role: NotRequired[ToolMessageRole] class ToolMessage(BaseModel): content: Nullable[ToolMessageContent] + ROLE: Annotated[ + Annotated[Literal["tool"], AfterValidator(validate_const("tool"))], + pydantic.Field(alias="role"), + ] = "tool" + tool_call_id: OptionalNullable[str] = UNSET name: OptionalNullable[str] = UNSET - role: Optional[ToolMessageRole] = "tool" - @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["tool_call_id", "name", "role"] + optional_fields = ["tool_call_id", "name"] nullable_fields = ["content", "tool_call_id", "name"] null_default_fields = [] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py b/packages/azure/src/mistralai/azure/client/models/tooltypes.py similarity index 77% rename from packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py rename to packages/azure/src/mistralai/azure/client/models/tooltypes.py index 8b812ae0..1cce7446 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py +++ b/packages/azure/src/mistralai/azure/client/models/tooltypes.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import UnrecognizedStr +from mistralai.azure.client.types import UnrecognizedStr from typing import Literal, Union diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py b/packages/azure/src/mistralai/azure/client/models/usageinfo.py similarity index 98% rename from packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py rename to packages/azure/src/mistralai/azure/client/models/usageinfo.py index bbe5cdfa..19a6b09f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py +++ b/packages/azure/src/mistralai/azure/client/models/usageinfo.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py b/packages/azure/src/mistralai/azure/client/models/usermessage.py similarity index 73% rename from packages/mistralai_azure/src/mistralai_azure/models/usermessage.py rename to packages/azure/src/mistralai/azure/client/models/usermessage.py index 85fedb4b..96439c64 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py +++ b/packages/azure/src/mistralai/azure/client/models/usermessage.py @@ -2,10 +2,13 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.azure.client.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.azure.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict UserMessageContentTypedDict = TypeAliasType( @@ -16,22 +19,22 @@ UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) -UserMessageRole = Literal["user",] - - class UserMessageTypedDict(TypedDict): content: Nullable[UserMessageContentTypedDict] - role: NotRequired[UserMessageRole] + role: Literal["user"] class UserMessage(BaseModel): content: Nullable[UserMessageContent] - role: Optional[UserMessageRole] = "user" + ROLE: Annotated[ + Annotated[Literal["user"], AfterValidator(validate_const("user"))], + pydantic.Field(alias="role"), + ] = "user" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["role"] + optional_fields = [] nullable_fields = ["content"] null_default_fields = [] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py b/packages/azure/src/mistralai/azure/client/models/validationerror.py similarity index 90% rename from packages/mistralai_azure/src/mistralai_azure/models/validationerror.py rename to packages/azure/src/mistralai/azure/client/models/validationerror.py index 4caff4a6..817ecf7a 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py +++ b/packages/azure/src/mistralai/azure/client/models/validationerror.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing import List, Union from typing_extensions import TypeAliasType, TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/ocr.py b/packages/azure/src/mistralai/azure/client/ocr.py similarity index 97% rename from packages/mistralai_azure/src/mistralai_azure/ocr.py rename to packages/azure/src/mistralai/azure/client/ocr.py index 31e27f6e..098e764b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/ocr.py +++ b/packages/azure/src/mistralai/azure/client/ocr.py @@ -1,10 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK -from mistralai_azure import models, utils -from mistralai_azure._hooks import HookContext -from mistralai_azure.types import Nullable, OptionalNullable, UNSET -from mistralai_azure.utils.unmarshal_json_response import unmarshal_json_response +from mistralai.azure.client import models, utils +from mistralai.azure.client._hooks import HookContext +from mistralai.azure.client.types import Nullable, OptionalNullable, UNSET +from mistralai.azure.client.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, List, Mapping, Optional, Union diff --git a/packages/mistralai_azure/src/mistralai_azure/py.typed b/packages/azure/src/mistralai/azure/client/py.typed similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/py.typed rename to packages/azure/src/mistralai/azure/client/py.typed diff --git a/packages/mistralai_azure/src/mistralai_azure/sdk.py b/packages/azure/src/mistralai/azure/client/sdk.py similarity index 59% rename from packages/mistralai_azure/src/mistralai_azure/sdk.py rename to packages/azure/src/mistralai/azure/client/sdk.py index 04bc7743..985cb9a8 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdk.py +++ b/packages/azure/src/mistralai/azure/client/sdk.py @@ -7,61 +7,74 @@ from .utils.retries import RetryConfig import httpx import importlib -from mistralai_azure import models, utils -from mistralai_azure._hooks import SDKHooks -from mistralai_azure.types import OptionalNullable, UNSET -from typing import Any, Callable, Dict, Optional, TYPE_CHECKING, Union, cast +import logging +from mistralai.azure.client import models, utils +from mistralai.azure.client._hooks import SDKHooks +from mistralai.azure.client.types import OptionalNullable, UNSET +import sys +from typing import Callable, Dict, Optional, TYPE_CHECKING, Union, cast +import warnings import weakref +logger = logging.getLogger(__name__) + if TYPE_CHECKING: - from mistralai_azure.chat import Chat - from mistralai_azure.ocr import Ocr + from mistralai.azure.client.chat import Chat + from mistralai.azure.client.ocr import Ocr class MistralAzure(BaseSDK): - r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" + r"""Mistral AI API: Dora OpenAPI schema + + Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + """ chat: "Chat" r"""Chat Completion API.""" ocr: "Ocr" _sub_sdk_map = { - "chat": ("mistralai_azure.chat", "Chat"), - "ocr": ("mistralai_azure.ocr", "Ocr"), + "chat": ("mistralai.azure.client.chat", "Chat"), + "ocr": ("mistralai.azure.client.ocr", "Ocr"), } def __init__( self, - azure_api_key: Union[str, Callable[[], str]], - azure_endpoint: str, + api_key: Union[str, Callable[[], str]], + server: Optional[str] = None, + server_url: Optional[str] = None, url_params: Optional[Dict[str, str]] = None, client: Optional[HttpClient] = None, async_client: Optional[AsyncHttpClient] = None, retry_config: OptionalNullable[RetryConfig] = UNSET, timeout_ms: Optional[int] = None, debug_logger: Optional[Logger] = None, + api_version: str = "2024-05-01-preview", ) -> None: r"""Instantiates the SDK configuring it with the provided parameters. - :param azure_api_key: The azure_api_key required for authentication - :param azure_endpoint: The Azure AI endpoint URL to use for all methods + :param api_key: The api_key required for authentication + :param server: The server by name to use for all methods + :param server_url: The server URL to use for all methods :param url_params: Parameters to optionally template the server URL with :param client: The HTTP client to use for all synchronous methods :param async_client: The Async HTTP client to use for all asynchronous methods :param retry_config: The retry configuration to use for all supported methods :param timeout_ms: Optional request timeout applied to each operation in milliseconds + :param api_version: Azure API version to use (injected as query param) """ - - # if azure_endpoint doesn't end with `/v1` add it - if not azure_endpoint.endswith("/"): - azure_endpoint += "/" - if not azure_endpoint.endswith("v1/"): - azure_endpoint += "v1/" - server_url = azure_endpoint - client_supplied = True if client is None: - client = httpx.Client() + client = httpx.Client( + follow_redirects=True, + params={"api-version": api_version}, + ) client_supplied = False + elif api_version != "2024-05-01-preview": + warnings.warn( + "api_version is ignored when a custom client is provided. " + "Set the api-version query parameter on your httpx.Client directly.", + stacklevel=2, + ) assert issubclass( type(client), HttpClient @@ -69,8 +82,17 @@ def __init__( async_client_supplied = True if async_client is None: - async_client = httpx.AsyncClient() + async_client = httpx.AsyncClient( + follow_redirects=True, + params={"api-version": api_version}, + ) async_client_supplied = False + elif api_version != "2024-05-01-preview": + warnings.warn( + "api_version is ignored when a custom async_client is provided. " + "Set the api-version query parameter on your httpx.AsyncClient directly.", + stacklevel=2, + ) if debug_logger is None: debug_logger = get_default_logger() @@ -79,11 +101,15 @@ def __init__( type(async_client), AsyncHttpClient ), "The provided async_client must implement the AsyncHttpClient protocol." - security: Any = None - if callable(azure_api_key): - security = lambda: models.Security(api_key=azure_api_key()) # pylint: disable=unnecessary-lambda-assignment + security: Union[models.Security, Callable[[], models.Security]] + if callable(api_key): + + def get_security() -> models.Security: + return models.Security(api_key=api_key()) + + security = get_security else: - security = models.Security(api_key=azure_api_key) + security = models.Security(api_key=api_key) if server_url is not None: if url_params is not None: @@ -98,16 +124,15 @@ def __init__( async_client_supplied=async_client_supplied, security=security, server_url=server_url, - server=None, + server=server, retry_config=retry_config, timeout_ms=timeout_ms, debug_logger=debug_logger, ), + parent_ref=self, ) hooks = SDKHooks() - - # pylint: disable=protected-access self.sdk_configuration.__dict__["_hooks"] = hooks current_server_url, *_ = self.sdk_configuration.get_server_details() @@ -127,13 +152,28 @@ def __init__( self.sdk_configuration.async_client_supplied, ) + def dynamic_import(self, modname, retries=3): + last_exc: Optional[Exception] = None + for attempt in range(retries): + try: + return importlib.import_module(modname) + except (KeyError, ImportError, ModuleNotFoundError) as e: + last_exc = e + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise ImportError( + f"Failed to import module '{modname}' after {retries} attempts" + ) from last_exc + def __getattr__(self, name: str): if name in self._sub_sdk_map: module_path, class_name = self._sub_sdk_map[name] try: - module = importlib.import_module(module_path) + module = self.dynamic_import(module_path) klass = getattr(module, class_name) - instance = klass(self.sdk_configuration) + instance = klass(self.sdk_configuration, parent_ref=self) setattr(self, name, instance) return instance except ImportError as e: @@ -160,7 +200,7 @@ def __enter__(self): async def __aenter__(self): return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, _exc_type, _exc_val, _exc_tb): if ( self.sdk_configuration.client is not None and not self.sdk_configuration.client_supplied @@ -168,7 +208,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.sdk_configuration.client.close() self.sdk_configuration.client = None - async def __aexit__(self, exc_type, exc_val, exc_tb): + async def __aexit__(self, _exc_type, _exc_val, _exc_tb): if ( self.sdk_configuration.async_client is not None and not self.sdk_configuration.async_client_supplied diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/azure/src/mistralai/azure/client/sdkconfiguration.py similarity index 93% rename from packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py rename to packages/azure/src/mistralai/azure/client/sdkconfiguration.py index cf85c47e..919225f9 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/azure/src/mistralai/azure/client/sdkconfiguration.py @@ -9,8 +9,8 @@ from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix from dataclasses import dataclass -from mistralai_gcp import models -from mistralai_gcp.types import OptionalNullable, UNSET +from mistralai.azure.client import models +from mistralai.azure.client.types import OptionalNullable, UNSET from pydantic import Field from typing import Callable, Dict, Optional, Tuple, Union diff --git a/packages/mistralai_azure/src/mistralai_azure/types/__init__.py b/packages/azure/src/mistralai/azure/client/types/__init__.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/types/__init__.py rename to packages/azure/src/mistralai/azure/client/types/__init__.py diff --git a/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py b/packages/azure/src/mistralai/azure/client/types/basemodel.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/types/basemodel.py rename to packages/azure/src/mistralai/azure/client/types/basemodel.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/azure/src/mistralai/azure/client/utils/__init__.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/__init__.py rename to packages/azure/src/mistralai/azure/client/utils/__init__.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py b/packages/azure/src/mistralai/azure/client/utils/annotations.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/annotations.py rename to packages/azure/src/mistralai/azure/client/utils/annotations.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/datetimes.py b/packages/azure/src/mistralai/azure/client/utils/datetimes.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/datetimes.py rename to packages/azure/src/mistralai/azure/client/utils/datetimes.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/enums.py b/packages/azure/src/mistralai/azure/client/utils/enums.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/enums.py rename to packages/azure/src/mistralai/azure/client/utils/enums.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py b/packages/azure/src/mistralai/azure/client/utils/eventstreaming.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py rename to packages/azure/src/mistralai/azure/client/utils/eventstreaming.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/forms.py b/packages/azure/src/mistralai/azure/client/utils/forms.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/forms.py rename to packages/azure/src/mistralai/azure/client/utils/forms.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/headers.py b/packages/azure/src/mistralai/azure/client/utils/headers.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/headers.py rename to packages/azure/src/mistralai/azure/client/utils/headers.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/logger.py b/packages/azure/src/mistralai/azure/client/utils/logger.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/logger.py rename to packages/azure/src/mistralai/azure/client/utils/logger.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/metadata.py b/packages/azure/src/mistralai/azure/client/utils/metadata.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/metadata.py rename to packages/azure/src/mistralai/azure/client/utils/metadata.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py b/packages/azure/src/mistralai/azure/client/utils/queryparams.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py rename to packages/azure/src/mistralai/azure/client/utils/queryparams.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py b/packages/azure/src/mistralai/azure/client/utils/requestbodies.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py rename to packages/azure/src/mistralai/azure/client/utils/requestbodies.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/retries.py b/packages/azure/src/mistralai/azure/client/utils/retries.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/retries.py rename to packages/azure/src/mistralai/azure/client/utils/retries.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/security.py b/packages/azure/src/mistralai/azure/client/utils/security.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/security.py rename to packages/azure/src/mistralai/azure/client/utils/security.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py b/packages/azure/src/mistralai/azure/client/utils/serializers.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/serializers.py rename to packages/azure/src/mistralai/azure/client/utils/serializers.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/unmarshal_json_response.py b/packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py similarity index 95% rename from packages/mistralai_azure/src/mistralai_azure/utils/unmarshal_json_response.py rename to packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py index f5813119..5317ac87 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/unmarshal_json_response.py +++ b/packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py @@ -5,7 +5,7 @@ import httpx from .serializers import unmarshal_json -from mistralai_azure import models +from mistralai.azure.client import models T = TypeVar("T") diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/url.py b/packages/azure/src/mistralai/azure/client/utils/url.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/url.py rename to packages/azure/src/mistralai/azure/client/utils/url.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/values.py b/packages/azure/src/mistralai/azure/client/utils/values.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/values.py rename to packages/azure/src/mistralai/azure/client/utils/values.py diff --git a/packages/mistralai_azure/uv.lock b/packages/azure/uv.lock similarity index 99% rename from packages/mistralai_azure/uv.lock rename to packages/azure/uv.lock index a227d093..cedb1ce8 100644 --- a/packages/mistralai_azure/uv.lock +++ b/packages/azure/uv.lock @@ -154,7 +154,7 @@ wheels = [ [[package]] name = "mistralai-azure" -version = "1.8.0" +version = "2.0.0a4" source = { editable = "." } dependencies = [ { name = "httpcore" }, diff --git a/packages/gcp/.genignore b/packages/gcp/.genignore new file mode 100644 index 00000000..9a119b75 --- /dev/null +++ b/packages/gcp/.genignore @@ -0,0 +1,6 @@ +pyproject.toml +src/mistralai/gcp/client/sdk.py +src/mistralai/gcp/client/_hooks/registration.py +README.md +USAGE.md +docs/sdks/**/README.md diff --git a/packages/mistralai_gcp/.gitattributes b/packages/gcp/.gitattributes similarity index 100% rename from packages/mistralai_gcp/.gitattributes rename to packages/gcp/.gitattributes diff --git a/packages/mistralai_gcp/.gitignore b/packages/gcp/.gitignore similarity index 100% rename from packages/mistralai_gcp/.gitignore rename to packages/gcp/.gitignore diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/gcp/.speakeasy/gen.lock similarity index 55% rename from packages/mistralai_gcp/.speakeasy/gen.lock rename to packages/gcp/.speakeasy/gen.lock index 31eb1bc7..8ce6c5ea 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/gcp/.speakeasy/gen.lock @@ -1,24 +1,25 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: d91fd326da9118e6c9dddea48eaf47a7 + docChecksum: bc4a0ba9c38418d84a6a8a76b503977b docVersion: 1.0.0 speakeasyVersion: 1.685.0 generationVersion: 2.794.1 - releaseVersion: 1.8.0 - configChecksum: 42a1e5752a774fcdb0a5949bd6535933 + releaseVersion: 2.0.0a4 + configChecksum: 95fb33ae488fa72fb4ba17c6b93551a9 repoURL: https://github.com/mistralai/client-python.git - repoSubDirectory: packages/mistralai_gcp - installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/mistralai_gcp + repoSubDirectory: packages/gcp + installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/gcp published: true persistentEdits: - generation_id: e1cf1217-2a08-4cb8-b92c-542b4f885caa - pristine_commit_hash: 57fe0df69b76fe4754f039d49f7c40770fb3097d - pristine_tree_hash: c4c1037865fb86650ada485b300f96784045922f + generation_id: 5f09b925-b801-4bf0-bda9-6f9a3212c588 + pristine_commit_hash: 20c7ce96f6a097f98d3367b89a7bea09ba0ded7c + pristine_tree_hash: c30d519719cc0cd17d7bf53ae2c13b1d8b125c5e features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 + configurableModuleName: 0.2.0 constsAndDefaults: 1.0.5 core: 5.23.18 defaultEnabledRetries: 0.2.0 @@ -57,16 +58,12 @@ trackedFiles: pristine_git_object: 2e54e27e0ca97bee87918b2ae38cc6c335669a79 docs/models/assistantmessage.md: id: 7e0218023943 - last_write_checksum: sha1:e75d407349842b2de46ee3ca6250f9f51121cf38 - pristine_git_object: 3d0bd90b4433c1a919f917f4bcf2518927cdcd50 + last_write_checksum: sha1:47d5cd1a1bef9e398c12c207f5b3d8486d94f359 + pristine_git_object: 9ef638379aee1198742743800e778409c47a9b9d docs/models/assistantmessagecontent.md: id: 9f1795bbe642 last_write_checksum: sha1:1ce4066623a8d62d969e5ed3a088d73a9ba26643 pristine_git_object: 047b7cf95f4db203bf2c501680b73ca0562a122d - docs/models/assistantmessagerole.md: - id: bb5d2a4bc72f - last_write_checksum: sha1:82f2c4f469426bd476c1003a91394afb89cb7c91 - pristine_git_object: 658229e77eb6419391cf7941568164541c528387 docs/models/chatcompletionchoice.md: id: 0d15c59ab501 last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 @@ -77,12 +74,12 @@ trackedFiles: pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b docs/models/chatcompletionrequest.md: id: adffe90369d0 - last_write_checksum: sha1:1ac7d6b5a8aba9c922cf5fe45f94aee55228f9db - pristine_git_object: 6886f9dcd43e8d61f4ec6692235f281cb03a5c86 - docs/models/chatcompletionrequestmessages.md: - id: ec996b350e12 - last_write_checksum: sha1:2ecec8d12cdb48426f4eb62732066fc79fcd4ec3 - pristine_git_object: bc7708a67f06d74e8a5bf1facb2b23fb1e08053c + last_write_checksum: sha1:2bf5152388f18436be4fe1c541b8d423dcae175c + pristine_git_object: 61a25d86e7dc292621f7f6c0f8909137a16b9112 + docs/models/chatcompletionrequestmessage.md: + id: 3f5e170d418c + last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 + pristine_git_object: 91e9e062d0ef0cb69235c4ae4516548733ce28a9 docs/models/chatcompletionrequeststop.md: id: fcaf5bbea451 last_write_checksum: sha1:71a25f84f0d88c7acf72e801ced6159546201851 @@ -97,8 +94,16 @@ trackedFiles: pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b docs/models/chatcompletionstreamrequest.md: id: cf8f29558a68 - last_write_checksum: sha1:33778fdf71aa9b934ae48d51664daaa0dd817e04 - pristine_git_object: ff1940dd8a92d7892d895c3fc0e0a4b321e55534 + last_write_checksum: sha1:f30b2a7353e7406eb30af841a1a211ea5cb30cb0 + pristine_git_object: 3e790e7dc7143b0ae287ad2df14ae7e7a4085e3f + docs/models/chatcompletionstreamrequestmessage.md: + id: 053a98476cd2 + last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 + pristine_git_object: 2e4e93acca8983a3ea27b391d4606518946e13fe + docs/models/chatcompletionstreamrequeststop.md: + id: d0e89a4dca78 + last_write_checksum: sha1:a889e9580fa94bda7c848682d6ba501b7f5c0f41 + pristine_git_object: a48460a92ac47fec1de2188ba46b238229736d32 docs/models/chatcompletionstreamrequesttoolchoice.md: id: 210d5e5b1413 last_write_checksum: sha1:0543164caf3f4fb2bef3061dbd1a5e6b34b17ae9 @@ -113,20 +118,24 @@ trackedFiles: pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d docs/models/completionresponsestreamchoice.md: id: d56824d615a6 - last_write_checksum: sha1:dcf4b125b533192cb5aea1a68551866954712dc5 - pristine_git_object: c807dacd98eb3561ee45f40db71a92cb72b0f6de - docs/models/content.md: - id: bfd859c99f86 - last_write_checksum: sha1:6673dbd19871a701955a322348a4f7e51c38ffc8 - pristine_git_object: a833dc2c6043e36b85131c9243b4cc02b9fcc4c6 + last_write_checksum: sha1:0296a490df009dbfd04893fdebcc88dd6102a872 + pristine_git_object: 1532c25b8fc065d486f52d4610a7f757e5340875 + docs/models/completionresponsestreamchoicefinishreason.md: + id: 5f1fbfc90b8e + last_write_checksum: sha1:20824b4a223cbd3658b32440973a7d47dcd108b9 + pristine_git_object: 0fece473297227c75db4e7ded63417a2f117cac0 docs/models/contentchunk.md: id: d2d3a32080cd last_write_checksum: sha1:5839a26cdc412b78caad7fb59df97bdcea57be6d pristine_git_object: 22023e8b19692df969693b7a14f8cf6e0143859f docs/models/deltamessage.md: id: 6c5ed6b60968 - last_write_checksum: sha1:c213149256c620715d744c89685d5b6cbdea6f58 - pristine_git_object: 61deabbf7e37388fdd4c1789089d120cc0b937b9 + last_write_checksum: sha1:00052476b9b2474dbc149f18dd18c71c86d0fc74 + pristine_git_object: e0ee575f3fce7c312114ce8c5390efc5c4854952 + docs/models/deltamessagecontent.md: + id: 7307bedc8733 + last_write_checksum: sha1:a1211b8cb576ad1358e68983680ee326c3920a5e + pristine_git_object: 8142772d7ea33ad8a75cf9cf822564ba3f630de2 docs/models/fimcompletionrequest.md: id: b44677ecc293 last_write_checksum: sha1:24bcb54d39b3fabd487549a27b4c0a65dd5ffe50 @@ -147,10 +156,6 @@ trackedFiles: id: c97a11b764e9 last_write_checksum: sha1:958d5087050fdeb128745884ebcf565b4fdc3886 pristine_git_object: 5a9e2ff020d4939f7fd42c0673ea7bdd16cca99d - docs/models/finishreason.md: - id: 73315c2a39b3 - last_write_checksum: sha1:5b58c7fa9219f728b9731287e21abe1be9f11e4a - pristine_git_object: 45a5aedb7241cf080df3eb976a4413064d314009 docs/models/function.md: id: 416a80fba031 last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511 @@ -167,22 +172,22 @@ trackedFiles: id: a211c095f2ac last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc + docs/models/imagedetail.md: + id: f8217529b496 + last_write_checksum: sha1:fdf19ac9459f64616240955cb81a84ef03e775c8 + pristine_git_object: 1e5ba3fd405a14e5e2872cc85504584dca19b726 docs/models/imageurl.md: id: e75dd23cec1d - last_write_checksum: sha1:30131c77dd240c3bae48d9693698358e5cc0ae63 - pristine_git_object: 7c2bcbc36e99c3cf467d213d6a6a59d6300433d8 + last_write_checksum: sha1:a5cf621ce58a9cc7c96afa7de53367eac7b4cb0b + pristine_git_object: 6358e0acb2dea4816203413842243704ca955783 docs/models/imageurlchunk.md: id: 4407097bfff3 - last_write_checksum: sha1:7a478fd638234ece78770c7fc5e8d0adaf1c3727 - pristine_git_object: f1b926ef8e82443aa1446b1c64c2f02e33d7c789 - docs/models/imageurlchunkimageurl.md: - id: c7fae88454ce - last_write_checksum: sha1:5eff71b7a8be7baacb9ba8ca0be0a0f7a391a325 - pristine_git_object: 767389082d25f06e617fec2ef0134dd9fb2d4064 - docs/models/imageurlchunktype.md: - id: b9af2db9ff60 - last_write_checksum: sha1:990546f94648a09faf9d3ae55d7f6ee66de13e85 - pristine_git_object: 2064a0b405870313bd4b802a3b1988418ce8439e + last_write_checksum: sha1:30b72826963e22cadf76ac0b7604288dbc4fb943 + pristine_git_object: a84dac32b99390e3fd0559714ca43795742192c6 + docs/models/imageurlunion.md: + id: 9d3c691a9db0 + last_write_checksum: sha1:4e32bcd7d44746d2ddbfafbef96152bb2bdb2a15 + pristine_git_object: db97130f26199dcb354ecb7469d09530b035daa2 docs/models/jsonschema.md: id: a6b15ed6fac8 last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f @@ -191,10 +196,6 @@ trackedFiles: id: b071d5a509cc last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 - docs/models/messages.md: - id: 2103cd675c2f - last_write_checksum: sha1:f6940c9c67b98c49ae2bc2764f6c14178321f244 - pristine_git_object: 1d394500e8ffdd140457575568fc2ce465a1cc3a docs/models/mistralpromptmode.md: id: d17d5db4d3b6 last_write_checksum: sha1:abcb7205c5086169c7d9449d15ac142448a7d258 @@ -205,12 +206,8 @@ trackedFiles: pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 docs/models/referencechunk.md: id: 07895f9debfd - last_write_checksum: sha1:97d01dd2b907e87b58bebd9c950e1bef29747c89 - pristine_git_object: a132ca2fe6fbbaca644491cbc36d88b0c67cc6bc - docs/models/referencechunktype.md: - id: 0944b80ea9c8 - last_write_checksum: sha1:956b270766c7f11fe99f4a9b484cc29c159e7471 - pristine_git_object: 1e0e2fe64883ef5f3e628777b261b1224661d257 + last_write_checksum: sha1:4384049375a2566c7567599f97ce1ec19e9f6276 + pristine_git_object: d847e24845a399c7ca93d54701832fb65e01b3ab docs/models/responseformat.md: id: 50a1e4140614 last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add @@ -219,22 +216,14 @@ trackedFiles: id: cf1f250b82db last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f - docs/models/role.md: - id: b694540a5b1e - last_write_checksum: sha1:260a50c56a8bd03cc535edf98ebec06437f87f8d - pristine_git_object: affca78d5574cc42d8e6169f21968e5a8765e053 docs/models/security.md: id: 452e4d4eb67a last_write_checksum: sha1:ce2871b49c1632d50e22d0b1ebe4999021d52313 pristine_git_object: c698674c513f5b20c04f629e50154e67977275f7 - docs/models/stop.md: - id: f231cc9f5041 - last_write_checksum: sha1:86903cac5f57ad9b8ac07ecba6c454d40a53bdc8 - pristine_git_object: ba40ca83136d6d6cb4f1ef9e5ca3104a704e4846 docs/models/systemmessage.md: id: fdb7963e1cdf - last_write_checksum: sha1:97e726dff19a39b468767d5c01fc6256277ee71f - pristine_git_object: 0dba71c00f40c85e74b2c1967e077ffff9660f13 + last_write_checksum: sha1:c7603c5ce77ba2bcbda9eff65eeafdb1e9ecbec7 + pristine_git_object: 10bda10f921fb5d66c1606ff18e654b4e78ab197 docs/models/systemmessagecontent.md: id: 94a56febaeda last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb @@ -245,16 +234,12 @@ trackedFiles: pristine_git_object: 40030c170746d9953d25b979ab7e6f522018e230 docs/models/textchunk.md: id: 6cd12e0ef110 - last_write_checksum: sha1:6d41d1991d122805734ed0d90ee01592aa5ae6ff - pristine_git_object: 6daab3c381bd8c13d2935bf62578648a8470fc76 + last_write_checksum: sha1:aa448d4937c0c1cd562621f0a9080aa0dc6e4bd1 + pristine_git_object: b266619dcb57222ec343f373c43b2b5cef5b8b93 docs/models/thinkchunk.md: id: bca24d7153f6 - last_write_checksum: sha1:feb95a931bb9cdbfe28ab351618687e513cf830b - pristine_git_object: 66b2e0cde70e25e2927180d2e709503401fddeab - docs/models/thinkchunktype.md: - id: 0fbeed985341 - last_write_checksum: sha1:790f991f95c86c26a6abb9c9c5debda8b53526f5 - pristine_git_object: baf6f755252d027295be082b53ecf80555039414 + last_write_checksum: sha1:2b8ff7737fa7255673ca31da7cb2e6803fce9e02 + pristine_git_object: b07f598ebc5f0e9c041186c081dc98bc21104bdb docs/models/thinking.md: id: 07234f8dd364 last_write_checksum: sha1:a5962d1615b57996730da19e59fbfaa684321442 @@ -277,40 +262,28 @@ trackedFiles: pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 docs/models/toolmessage.md: id: 0553747c37a1 - last_write_checksum: sha1:3ac87031fdd4ba8b0996e95be8e7ef1a7ff41167 - pristine_git_object: a54f49332c2873471759b477fb4c712fa4fb61f5 + last_write_checksum: sha1:ac61e644ba7c6da607cb479eafd1db78d8e8012e + pristine_git_object: 7201481e61e269b238887deec30c03f7e16c53d7 docs/models/toolmessagecontent.md: id: f0522d2d3c93 last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 - docs/models/toolmessagerole.md: - id: f333d4d1ab56 - last_write_checksum: sha1:7e1c004bad24e928da0c286a9f053516b172d24f - pristine_git_object: c24e59c0c79ea886d266e38c673edd51531b9be6 docs/models/tooltypes.md: id: adb50fe63ea2 last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c pristine_git_object: 84e49253c9b9bd1bd314e2a126106404cbb52f16 - docs/models/type.md: - id: 98c32f09b2c8 - last_write_checksum: sha1:8aa9ca999e9648ddc2240bf80780684e3e858ddf - pristine_git_object: eb0581e7174b6951d69c485a64af5244cb8687fa docs/models/usageinfo.md: id: ec6fe65028a9 last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 docs/models/usermessage.md: id: ed66d7a0f80b - last_write_checksum: sha1:8291f7703e49ed669775dc953ea8cab6715dc7ed - pristine_git_object: 63b0131091cd211b3b1477c1d63b5666a26db546 + last_write_checksum: sha1:f0ed7d9cb7264f1d9e4a9190772df3f15e25346c + pristine_git_object: e7a932ed71496fa7cc358388c650d25f166f27a4 docs/models/usermessagecontent.md: id: 52c072c851e8 last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a pristine_git_object: 8350f9e8f8996c136093e38760990f62fd01f8cf - docs/models/usermessagerole.md: - id: 99ffa937c462 - last_write_checksum: sha1:52014480516828b43827aa966b7319d9074f1111 - pristine_git_object: 171124e45988e784c56a6b92a0057ba00efc0db4 docs/models/utils/retryconfig.md: id: 4343ac43161c last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d @@ -329,320 +302,324 @@ trackedFiles: pristine_git_object: a8fcb932ba2a01c5e96e3b04c59371e930b75558 scripts/prepare_readme.py: id: e0c5957a6035 - last_write_checksum: sha1:81c7dbabc0e726a4a150e6ef1bcba578d3f1153d - pristine_git_object: 6c4b993238c1a60d4df4bb7de0a0b0a82e385dbf + last_write_checksum: sha1:eb988bc0e00ed4bb14e9a3572845af14f06c9b42 + pristine_git_object: ae27b555c05c3c9f35d84e8bbe6a7c9f80cf94b2 scripts/publish.sh: id: fe273b08f514 last_write_checksum: sha1:b290b25b36dca3d5eb1a2e66a2e1bcf2e7326cf3 pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 - src/mistralai_gcp/__init__.py: - id: b6565f49e73b + src/mistralai/gcp/client/__init__.py: + id: 4f63decd432e last_write_checksum: sha1:36306d1d404b6aeb912d27f1d9c52f098ff7bf9b pristine_git_object: dd02e42e4cc509dc90e6ae70493054021faa5f9c - src/mistralai_gcp/_hooks/__init__.py: - id: 663f3129700b + src/mistralai/gcp/client/_hooks/__init__.py: + id: adcb191838d1 last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 - src/mistralai_gcp/_hooks/sdkhooks.py: - id: 46ab7e644296 - last_write_checksum: sha1:a2c018871bea47706a76b03d9a17dab717c339c0 - pristine_git_object: b81c2a2739d316cfed54daec48df6375155eb802 - src/mistralai_gcp/_hooks/types.py: - id: 1f77198243ec - last_write_checksum: sha1:fbf5f1fb258b75133c6b12ae688c03c18b2debd5 - pristine_git_object: f8088f4c26d3ae27202c716c80c391d4daed4156 - src/mistralai_gcp/_version.py: - id: 4e2b8c406f49 - last_write_checksum: sha1:18c7db41065d76e733dc84c997f2a7808892a7c7 - pristine_git_object: a170f0ab6d229979b9077477809b10f2774a0144 - src/mistralai_gcp/basesdk.py: - id: b84fa6111b69 - last_write_checksum: sha1:41381dd799bd5e1f8a15bb65a0672dc6cc4796c4 - pristine_git_object: 7a93de23ad83096b2392e05b11f29030b5961456 - src/mistralai_gcp/chat.py: - id: 1cc7d54332ba - last_write_checksum: sha1:b4b4db3cfeac555718e2d74e897c6ba22b07a672 - pristine_git_object: 3dd6040fc7a565ffc4854bcc25e1e24a6683896d - src/mistralai_gcp/fim.py: - id: 1e5bec08157c - last_write_checksum: sha1:2c27170c5932893d4e8bec8ce45b2dc6e0957cd6 - pristine_git_object: 36d9fd60baaad606d9d57a30afdd9566b06b4caa - src/mistralai_gcp/httpclient.py: - id: 7de4ac861042 + src/mistralai/gcp/client/_hooks/sdkhooks.py: + id: 7e23394c3f65 + last_write_checksum: sha1:4a03a16da35168f25ed0cccfdb0d4c4d86bbe242 + pristine_git_object: 2af4deeda8055f4c57c0c7f00a7b79033435cf34 + src/mistralai/gcp/client/_hooks/types.py: + id: 4f37fd18bfd9 + last_write_checksum: sha1:2b295cc28d5fa2c79495510c8b97a1ea60f993e0 + pristine_git_object: ea95bed210db9180824efddfb1b3e47f5bf96489 + src/mistralai/gcp/client/_version.py: + id: f87319e32c7b + last_write_checksum: sha1:8c07e6351bf2df8239b3c02db75ee469dba53394 + pristine_git_object: ba48dac120cadd3f586b38659dc04e50838daa11 + src/mistralai/gcp/client/basesdk.py: + id: 4d594572857b + last_write_checksum: sha1:45ed4b6078e01d52d1dcf4bdc5494b700f1a6cde + pristine_git_object: 6f9f5fd9a2cadc8893d6693c1d40a8114c0fdc2a + src/mistralai/gcp/client/chat.py: + id: 4c41f05f786e + last_write_checksum: sha1:a4d5609f51dee25dfc34f83e1eda2888aa01dda6 + pristine_git_object: 78541248204cbd5b92b6d6d362924fcdada8a948 + src/mistralai/gcp/client/fim.py: + id: 13d2d208e0ef + last_write_checksum: sha1:e6226c1720effd314afa7b9a21e5ec2347e5a74f + pristine_git_object: e2acacd58c28fa7ea718240b01a3714f7fc0b8f6 + src/mistralai/gcp/client/httpclient.py: + id: a53dd7be6a4c last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 pristine_git_object: 89560b566073785535643e694c112bedbd3db13d - src/mistralai_gcp/models/__init__.py: - id: 9a7b2a1f0dba - last_write_checksum: sha1:54654df1aecc8d4f634ebd4dbcb0fed16da80309 - pristine_git_object: fe85b133a3a7652cfcfd3b44074be3729c8a9b7b - src/mistralai_gcp/models/assistantmessage.py: - id: 0779dd85c548 - last_write_checksum: sha1:ccf5d6a93bf007d47f0415320afb047278e10172 - pristine_git_object: 17d740b6eeb433b2865a652d1dd760227ad38191 - src/mistralai_gcp/models/chatcompletionchoice.py: - id: b5843c853153 - last_write_checksum: sha1:d389ddcfb64980b6c56a42d53bce7c63e26cc569 - pristine_git_object: fe3ee952a207f772ec49972cbd30f83654c84ad9 - src/mistralai_gcp/models/chatcompletionrequest.py: - id: 42d6cdf4646f - last_write_checksum: sha1:9685d594f13e8500e9c7fbab1e0d4042fccfc23d - pristine_git_object: 80345f9d956f64396f48850641842b2a3a6f8bee - src/mistralai_gcp/models/chatcompletionresponse.py: - id: 14720f23411e - last_write_checksum: sha1:46f14c3e00d21e9f01756f111d353768ad939494 - pristine_git_object: a7953eb156cc8185d70f92df8a75a2ebb77840b9 - src/mistralai_gcp/models/chatcompletionstreamrequest.py: - id: 2e17680adc7e - last_write_checksum: sha1:37c2daaad5c557234b5f067152280440f4c96933 - pristine_git_object: e857d51522dc9964cde865d7f681bd856a3cbdaf - src/mistralai_gcp/models/completionchunk.py: - id: 7fa670acf4b8 - last_write_checksum: sha1:0d0fdb8efda7f0b6a8ff376b7da94cac8060e4e2 - pristine_git_object: ca002f52239f69b96dd967b5e91cb4ed544e51d0 - src/mistralai_gcp/models/completionevent.py: - id: c25e6676e263 - last_write_checksum: sha1:528f13beedc9befc6fb71d4f9f2a2d4ff5e91817 - pristine_git_object: 33278c119c62205b8d9b09297066dc61c2a86cd1 - src/mistralai_gcp/models/completionresponsestreamchoice.py: - id: 46946832a23e - last_write_checksum: sha1:bc42569eb80dc034a1bde9170c35e6bc4ff52bb8 - pristine_git_object: ec9df52818fabf6bef33094bc7d25398066df3af - src/mistralai_gcp/models/contentchunk.py: - id: 96dd7160dff7 - last_write_checksum: sha1:484722b90615ca7af20993c570de79fe990a50f2 - pristine_git_object: da5671e348d363927af77188da6af07240398826 - src/mistralai_gcp/models/deltamessage.py: - id: db6c3c4d3384 - last_write_checksum: sha1:e596610fa0dd100203cd7e515750782bfbdb0445 - pristine_git_object: 1801ac76522df2efc362712d46262aeba95abc87 - src/mistralai_gcp/models/fimcompletionrequest.py: - id: ed8593c435af - last_write_checksum: sha1:6561263425e385568189ffc61e6b00034136adc3 - pristine_git_object: bcc97c90d4d327c83666423317dae2dc90db3b82 - src/mistralai_gcp/models/fimcompletionresponse.py: - id: 5f85a7cdb5fd - last_write_checksum: sha1:3ac2057157c7d1cb1bfc81fca2915ba72546f968 - pristine_git_object: e1940b0a2290fc3f9afcbd9e945397b1b90660ec - src/mistralai_gcp/models/fimcompletionstreamrequest.py: - id: f17c4f8fa580 - last_write_checksum: sha1:ebbe89e576d498070fde6b195d5afa2dc8bd5eac - pristine_git_object: 34d2ba65682b971f675f427cdf3aa6539071ce3a - src/mistralai_gcp/models/function.py: - id: 4612d6f83b9a - last_write_checksum: sha1:7692ea8102475e4d82d83722a8aea1efde668774 - pristine_git_object: 7ad1ae645f99ab13c022c92e7733ff4b15d39cac - src/mistralai_gcp/models/functioncall.py: - id: a3ca765a9368 - last_write_checksum: sha1:e044de5b26b15d46dce8ad8bd0d13bdf3d24ef7d - pristine_git_object: 99554c8862922184a05074bf01f71fbe20ac8fea - src/mistralai_gcp/models/functionname.py: - id: f97eb2c1bae3 - last_write_checksum: sha1:6343e5b4f724db6088c2055b058a9ebdd9bda995 - pristine_git_object: 00ec22f5ca6ff2d68d5cce2a020846a672ab0a1b - src/mistralai_gcp/models/httpvalidationerror.py: - id: f1ac6b7c81f3 - last_write_checksum: sha1:8e98e27a5440e2e1dbe330d1c889d43919d90b51 - pristine_git_object: 79609351e675148ef074988bb6ea8a11b81087dc - src/mistralai_gcp/models/imageurl.py: - id: 1668e9d55730 - last_write_checksum: sha1:2b8eaac00c956beb87434f8d5a21dff12611c788 - pristine_git_object: 20d4ba7719a6c04d2c7864459a68cca808e1a3f2 - src/mistralai_gcp/models/imageurlchunk.py: - id: ebc4dfed0347 - last_write_checksum: sha1:5c625584449139a410138c9986323d1f86b52735 - pristine_git_object: ddb53f21a13aeed7884e213e92752de1870d9fb5 - src/mistralai_gcp/models/jsonschema.py: - id: 4c32e4fa593e - last_write_checksum: sha1:3c972f731f2bd92262ea04a65771c093254d3a5f - pristine_git_object: 26914b2f8562da07e2d54d68a5806bedd32ec16a - src/mistralai_gcp/models/mistralgcperror.py: - id: 690cf29f596b - last_write_checksum: sha1:0ec55c68e3daccf2aba3c52f0a7c77ad5102f4c9 - pristine_git_object: fec729a590b2ea981e01f4af99d8b36ba52b4664 - src/mistralai_gcp/models/mistralpromptmode.py: - id: d2ba58ed5184 - last_write_checksum: sha1:6fb8323de88682846a2a09e68550f3508a29f1f5 - pristine_git_object: a5cc534f8c53bc87b8451aac1b2a79e695530e71 - src/mistralai_gcp/models/no_response_error.py: - id: 7a773ba0687f + src/mistralai/gcp/client/models/__init__.py: + id: d9e976d01972 + last_write_checksum: sha1:f0554ff6b81286615330ffea947e619bc508bf19 + pristine_git_object: fb446c259f4ca1cc97ec64aac197f52b8224a096 + src/mistralai/gcp/client/models/assistantmessage.py: + id: d39c4bdd289e + last_write_checksum: sha1:08fa98315561d5bb2c094bf57e7d66639b86e3ee + pristine_git_object: 7061775b3dbd9be0b978ff2a2cb07e52c01fc80a + src/mistralai/gcp/client/models/chatcompletionchoice.py: + id: 8e65b56f3e6d + last_write_checksum: sha1:e6d1382e9f880b866130d900fd866997aaf80e45 + pristine_git_object: ae5a2fbf38afbd86233dcaa8aa1c8441f5ed9eba + src/mistralai/gcp/client/models/chatcompletionrequest.py: + id: 4694a31c0003 + last_write_checksum: sha1:edb744ec2baca1f9ba6574662fffb36fb7d3faab + pristine_git_object: 1bc039221910bf88396c96affe735c8ac822920b + src/mistralai/gcp/client/models/chatcompletionresponse.py: + id: dd9e4796fca9 + last_write_checksum: sha1:76d7257583389ff5021e320a8f9a45a6deb07c7c + pristine_git_object: 317c4d84e378c14294d58c5aefd8c55ffe28754a + src/mistralai/gcp/client/models/chatcompletionstreamrequest.py: + id: 7294862af8ea + last_write_checksum: sha1:75d5bfcc204339b152dc78e33ac449c3aa9b5432 + pristine_git_object: 0a5a0021a4862e7b92a5c31679bf42bfa704d15b + src/mistralai/gcp/client/models/completionchunk.py: + id: 6b9ed8c30877 + last_write_checksum: sha1:4afc07c1824d81640f52a5c8bf89fde8893269b9 + pristine_git_object: 9e54cb6dfaccf7f815b40be585e11585cb5fef78 + src/mistralai/gcp/client/models/completionevent.py: + id: 3f55c4b8fc75 + last_write_checksum: sha1:66665d921fd27df6ef0efce996a5446e49b989d8 + pristine_git_object: bb1550093ce9adcb9bcd0548b69796e82f4f260b + src/mistralai/gcp/client/models/completionresponsestreamchoice.py: + id: ad9b98ca7e1c + last_write_checksum: sha1:04d195584fe4ea16544685e9989e5ae35205179a + pristine_git_object: 6f306721fbe47780c778833b80e97ab5d25d8367 + src/mistralai/gcp/client/models/contentchunk.py: + id: 8714d3bf2698 + last_write_checksum: sha1:347f43b4d7dcab18e09e6c3323f745a25ecfb04c + pristine_git_object: 1cd9e502ab7d4860daa79f907beafa71da086ab3 + src/mistralai/gcp/client/models/deltamessage.py: + id: 404fc85f1a4c + last_write_checksum: sha1:3375624531d12279d225fb07a68e0396483b962f + pristine_git_object: 96923518438137cb729a69149b5b99be49836ad7 + src/mistralai/gcp/client/models/fimcompletionrequest.py: + id: 5b79e2595d31 + last_write_checksum: sha1:cc4fa68c60a6a500a9887e47dd2e9220327c6226 + pristine_git_object: f37bbcc3cab020224531da898dd99cc175d49cd9 + src/mistralai/gcp/client/models/fimcompletionresponse.py: + id: 402f602d29b8 + last_write_checksum: sha1:cfe26848c7b14d6e374b7944d7ad44df822990b0 + pristine_git_object: 5b80da3f03e4e99dfca971a53af1cf6472c889bb + src/mistralai/gcp/client/models/fimcompletionstreamrequest.py: + id: 31190cf25070 + last_write_checksum: sha1:720f0a039a62cb508d513475a0e4bad45a9aa03c + pristine_git_object: 8e6102612998bde70d830bb0b8ee3a5e2a4dd01e + src/mistralai/gcp/client/models/function.py: + id: 2285a899b32e + last_write_checksum: sha1:a69ad9c8cd723e78a3949deefe43bcbf57426916 + pristine_git_object: 28577eff06d052aeb58c2795dd0a92ae4f2e7552 + src/mistralai/gcp/client/models/functioncall.py: + id: 17bb51f08e5f + last_write_checksum: sha1:b5fe2f061ea5f47057ee50011babc80de27e0ee6 + pristine_git_object: 0f1b24251ce728b3c2a0fb9e9ca94f90a9c3b7be + src/mistralai/gcp/client/models/functionname.py: + id: 313a6001145f + last_write_checksum: sha1:fe1eefaed314efa788bd15beb63bf6b81abb307e + pristine_git_object: 585b9e39762e49356823e211ad86f701bca389b8 + src/mistralai/gcp/client/models/httpvalidationerror.py: + id: bdb67f678798 + last_write_checksum: sha1:58b6b7a2b2f8e4f66fc14c38540a26cfd2541a1e + pristine_git_object: 57df72607adc980b061d092f77140c6dbd36ecec + src/mistralai/gcp/client/models/imagedetail.py: + id: a28b2f3e2cb5 + last_write_checksum: sha1:a4874529961952019eaa86a2fa0989626f537a4c + pristine_git_object: 68ed76080716eb1424b13f182479f57e51a4fabf + src/mistralai/gcp/client/models/imageurl.py: + id: 4e330f3eae74 + last_write_checksum: sha1:3c5d70c0698b1b4b9c99087241227bab3dc0cdbf + pristine_git_object: d4f298f12d8095590cded5714091596b505c59b1 + src/mistralai/gcp/client/models/imageurlchunk.py: + id: e68a4a393e9b + last_write_checksum: sha1:2eb2c8a205e5f8b320e2f597075cad9e5e27475b + pristine_git_object: fc5284c102c17a33c1ba6029c87515d509cd014b + src/mistralai/gcp/client/models/jsonschema.py: + id: 39c6e7d412a0 + last_write_checksum: sha1:29ba87457959588ff7d8188ae2382fb88740151d + pristine_git_object: 443c429dd1461d7a6817335626cd585577c5bffe + src/mistralai/gcp/client/models/mistralgcperror.py: + id: 278d296220ff + last_write_checksum: sha1:7267c829a842a94c5b84ac248a1610ce45f3db4e + pristine_git_object: 9de91bf2a4abf8b0d0922eb6062fe2ab817a8aee + src/mistralai/gcp/client/models/mistralpromptmode.py: + id: 8be4a4a683e4 + last_write_checksum: sha1:c958567e95490abf3941fde69be69733e8afb90e + pristine_git_object: c765e4f1a0b86735255771231377f13d62f3d7a6 + src/mistralai/gcp/client/models/no_response_error.py: + id: 2a7fa173594b last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 - src/mistralai_gcp/models/prediction.py: - id: cd3b43190e22 - last_write_checksum: sha1:a0411a8e3b1ecb42b91405dd9ee2a2ee5f3fad59 - pristine_git_object: 36c87ab046ed9f1a28a371fbdc5c7d584d71b6d3 - src/mistralai_gcp/models/referencechunk.py: - id: ee00a52fb6dd - last_write_checksum: sha1:d0c05b6b1e7d085833d4a9ef85f1e0088c86d3a5 - pristine_git_object: 904e8b8250570371e2b59895196986a45e6d3562 - src/mistralai_gcp/models/responseformat.py: - id: ad17dac36a51 - last_write_checksum: sha1:296d4b52f934c48490b71d85e1e9d0e207cee21a - pristine_git_object: 9fe5116ca46d713f5f23c92ec1de8a73c5124408 - src/mistralai_gcp/models/responseformats.py: - id: deb9c36c5ec5 + src/mistralai/gcp/client/models/prediction.py: + id: 7a5463285bc8 + last_write_checksum: sha1:1d1e81082d1c2bfd613f0bc00f7173995ad67c0c + pristine_git_object: f53579edc665dd7fc1cc2497b0cd05b69e541cd8 + src/mistralai/gcp/client/models/referencechunk.py: + id: 523e477f8725 + last_write_checksum: sha1:d29c5fc1d8b6850fdeb3abc7f83185de92571b23 + pristine_git_object: 274ea7f7b142714d96040428fe7b87eeb48432cb + src/mistralai/gcp/client/models/responseformat.py: + id: 06774bb65b42 + last_write_checksum: sha1:a52a60dc45c0b0939b99754d6c0c603ef2f737d3 + pristine_git_object: 34ae6b039a6c83c603fc6d47f6b2f233ec6c817a + src/mistralai/gcp/client/models/responseformats.py: + id: 18112ad0f6db last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 - src/mistralai_gcp/models/responsevalidationerror.py: - id: 78e210042d35 - last_write_checksum: sha1:b8ba70238453017393e721c7d61b5f1e268d7c17 - pristine_git_object: ebd4f214747d451dc2733d6ea838c67bb0c84797 - src/mistralai_gcp/models/sdkerror.py: - id: beed68eccaa1 - last_write_checksum: sha1:a058f2519ec22f72457e800600be469f13ff9915 - pristine_git_object: 7f53bbcd548d15f4fdd529bd3caea5249eb5e8e6 - src/mistralai_gcp/models/security.py: - id: 32f877bd1399 - last_write_checksum: sha1:7bad1150440143f9f6faefe33911edf6c2afdec1 - pristine_git_object: 3857494264c0444d330c54570483710a5ed321f0 - src/mistralai_gcp/models/systemmessage.py: - id: 13826cd6cb74 - last_write_checksum: sha1:876e84816c4e27ad77d6313777ba841ea3086cf9 - pristine_git_object: d74bdf3255bac53335eea08a6010cf1cc19380dd - src/mistralai_gcp/models/systemmessagecontentchunks.py: - id: 8233735d37db - last_write_checksum: sha1:38fedfdb83824054a1734bcc7d39e7e040bf4792 - pristine_git_object: e0b5bbc30828cbf572e603efc86ee2695102ea31 - src/mistralai_gcp/models/textchunk.py: - id: a330626b2006 - last_write_checksum: sha1:b801cf9b1913a70841c8fbdc9d433f0380ea82d8 - pristine_git_object: c4a8cf28cd2281cfda40cefa70ce1bd64d3e750d - src/mistralai_gcp/models/thinkchunk.py: - id: c38f6a213cc1 - last_write_checksum: sha1:a072f3bf01c2dc90ef6cc1b188b2e00e15923e07 - pristine_git_object: b88c0cb54c6926b3c896b3c192c5f3c51c676a51 - src/mistralai_gcp/models/tool.py: - id: 86b94d6a3bcb - last_write_checksum: sha1:14a081eb8639d834a7c4f209a79e7d1270202205 - pristine_git_object: 800de633962a0ccddab52596eae542318bb491b0 - src/mistralai_gcp/models/toolcall.py: - id: 3047e78c2ac3 - last_write_checksum: sha1:d219f8d7de19f501b799caf0e232bdce95e2c891 - pristine_git_object: 23ef157aa1d24498805a489a8cebf3c0e257d919 - src/mistralai_gcp/models/toolchoice.py: - id: 1f3d5233426e - last_write_checksum: sha1:936b1ac7b44bc1bf357e6a66cc42ed0127ad015e - pristine_git_object: 4a1483305f606afcc704e8d51ae363468354849e - src/mistralai_gcp/models/toolchoiceenum.py: - id: b4431b9cf3fd + src/mistralai/gcp/client/models/responsevalidationerror.py: + id: b90c1c09ac00 + last_write_checksum: sha1:e4321c1141ba7b1f6a8c217124e02ea0c70d9ad1 + pristine_git_object: 0e86ea6cb79fd4598d527dfef403ba66d435d3bb + src/mistralai/gcp/client/models/sdkerror.py: + id: a7cf4fa8974b + last_write_checksum: sha1:a3b60234deceb7fbcb57926c265e02e9fefc0835 + pristine_git_object: 00bc1d99353e7e2415d92c3e906c2c09712e5a64 + src/mistralai/gcp/client/models/security.py: + id: 7e13bda8273b + last_write_checksum: sha1:7086e929823d4eefe80cc279b605adfc8bbb08aa + pristine_git_object: 10a469b54d5e03873fb7d7d98627f2376c93d484 + src/mistralai/gcp/client/models/systemmessage.py: + id: 6537664d2d1b + last_write_checksum: sha1:e7f8dc73154c6985fcdbb77259df9bbc4745f976 + pristine_git_object: a7d695a7791eb5e97cd8f74e81c475c78e4b1a67 + src/mistralai/gcp/client/models/systemmessagecontentchunks.py: + id: e120a6469c89 + last_write_checksum: sha1:55529f2f29ba3087fbf117dbbe64e1dda92b2958 + pristine_git_object: 225f38b712f5f3c7abfd526cc8c0386687814f36 + src/mistralai/gcp/client/models/textchunk.py: + id: a134f120d4dc + last_write_checksum: sha1:9f46381e01f235560017ea80fbc85210eb625a99 + pristine_git_object: 77576c9fd87f0861bf6a3496aeae7e8bb8dc986a + src/mistralai/gcp/client/models/thinkchunk.py: + id: 59a1d1ef2020 + last_write_checksum: sha1:9fcccb19d87bc41f771cae710eeb8f28c229070d + pristine_git_object: b65fffb21d5cb060acaa648a70e337a43595cd32 + src/mistralai/gcp/client/models/tool.py: + id: 4b27d45e56ad + last_write_checksum: sha1:6d139575b740ea1f9f68a73b7bc2c95c30a10345 + pristine_git_object: d09c68542f2cb1f3bae0ffc7a7b163ad08a8e973 + src/mistralai/gcp/client/models/toolcall.py: + id: e6c25869a579 + last_write_checksum: sha1:5acf0eca8b1f4c459c6d8cadbbbd90605201ddc0 + pristine_git_object: a1edf3370426957980ff212367d56909ea8fa548 + src/mistralai/gcp/client/models/toolchoice.py: + id: cb13a9f64c92 + last_write_checksum: sha1:3ad6b48b24b39609e86229193ad18d84b1b3c818 + pristine_git_object: de3828dac8bc23e32b9f9434adccc770b5ce1212 + src/mistralai/gcp/client/models/toolchoiceenum.py: + id: d62e9c92d93c last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 - src/mistralai_gcp/models/toolmessage.py: - id: e21a2326eb31 - last_write_checksum: sha1:c332f72e272fff7970f52e2b15223a2898ce9b15 - pristine_git_object: d6aa2621b83bde261fab7bd15f58273861f88738 - src/mistralai_gcp/models/tooltypes.py: - id: b4c1716d51b3 - last_write_checksum: sha1:0f8fe0c437736eb584cce298a5e72c4e25f7c42b - pristine_git_object: 8b812ae0cfee81a1cd8ab0180e65f57d19a0dcbd - src/mistralai_gcp/models/usageinfo.py: - id: 574d1999c265 - last_write_checksum: sha1:a0a88fe5b3cae9317781b99cb3cc1916a9ba17cc - pristine_git_object: 59f36158761c3a86900256a6ed73845c455417c7 - src/mistralai_gcp/models/usermessage.py: - id: cf3691ffafa6 - last_write_checksum: sha1:d0ed86a67403d65ed6ac7a31aa5f73e19ecfa670 - pristine_git_object: 0168b45235bc891888c095565af832535dd26139 - src/mistralai_gcp/models/validationerror.py: - id: f1a6468621bd - last_write_checksum: sha1:a4cc5969f12e00be3506edc90ec21a01d5415eff - pristine_git_object: 033d4b63d1c321ae2c49e8684b34817adddca4c2 - src/mistralai_gcp/py.typed: - id: 7f25f97fed44 + src/mistralai/gcp/client/models/toolmessage.py: + id: b3774786c2e9 + last_write_checksum: sha1:ef21eb555f41ec70010dbcea1a155af988936061 + pristine_git_object: 65b1d9d62d37361a06b3fd3ee1790eb3a976a94f + src/mistralai/gcp/client/models/tooltypes.py: + id: 5926c64f5229 + last_write_checksum: sha1:ffd576511eed9f823c3d67df9fc5574d8d53c54b + pristine_git_object: fd1aa13d7b8c5d9bdb0922e04b8bd653ff843f60 + src/mistralai/gcp/client/models/usageinfo.py: + id: 3aab1af66cff + last_write_checksum: sha1:47c6311bc1db47849a72c8e1bcc64dac9cec637e + pristine_git_object: 9b7207b10ea9d46d8216c104c45be1a52fb093d9 + src/mistralai/gcp/client/models/usermessage.py: + id: 9cfa7260463e + last_write_checksum: sha1:580acf868a3d180eef34b2af9c2d20f78e4fb693 + pristine_git_object: c083e16d4aa536beec9f9e1151ebbe8c1797798c + src/mistralai/gcp/client/models/validationerror.py: + id: 6b4f4910ea9c + last_write_checksum: sha1:2792fd656f55519902f37670fb9fb3b43b4aa016 + pristine_git_object: 2d330e9acb579cc4928fa27fdd72288ce8832b8b + src/mistralai/gcp/client/py.typed: + id: 98b8ab80ab0d last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 - src/mistralai_gcp/sdkconfiguration.py: - id: 84fd7d3e219a - last_write_checksum: sha1:df51450c87f807c849e2aefb0a154aa4426fd8e3 - pristine_git_object: cf85c47e5e33956a64ddea53d85cdb7cc4bb687e - src/mistralai_gcp/types/__init__.py: - id: 15a92fdbd0a1 + src/mistralai/gcp/client/sdkconfiguration.py: + id: 57be0f79ea1e + last_write_checksum: sha1:0c5905e7c6092f57c15ee4318a85c0985bcc1ccf + pristine_git_object: d56a634f688f6697ba84962381084dc2d0836ac9 + src/mistralai/gcp/client/types/__init__.py: + id: f7ef15ac2ba1 last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c - src/mistralai_gcp/types/basemodel.py: - id: 0dd6dc277359 + src/mistralai/gcp/client/types/basemodel.py: + id: 24babf758c19 last_write_checksum: sha1:10d84aedeb9d35edfdadf2c3020caa1d24d8b584 pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee - src/mistralai_gcp/utils/__init__.py: - id: bb44726e5fa4 + src/mistralai/gcp/client/utils/__init__.py: + id: a30c8ff6dcff last_write_checksum: sha1:887f56a717845fab7445cc368d2a17d850c3565a pristine_git_object: 05f26ade57efb8c54a774fbcb939fb1a7dc655ce - src/mistralai_gcp/utils/annotations.py: - id: aeecca0c40a3 + src/mistralai/gcp/client/utils/annotations.py: + id: 9b2cd4ffc6e9 last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 - src/mistralai_gcp/utils/datetimes.py: - id: e3e3bb6cb264 + src/mistralai/gcp/client/utils/datetimes.py: + id: dd1f0f91ea9d last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 - src/mistralai_gcp/utils/enums.py: - id: 9f020fc8d361 + src/mistralai/gcp/client/utils/enums.py: + id: 2341407d5443 last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 - src/mistralai_gcp/utils/eventstreaming.py: - id: d570df9074cf + src/mistralai/gcp/client/utils/eventstreaming.py: + id: bb66f0c3e0dc last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc - src/mistralai_gcp/utils/forms.py: - id: fe642748c385 + src/mistralai/gcp/client/utils/forms.py: + id: ebf34781d6bd last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 - src/mistralai_gcp/utils/headers.py: - id: 0cb933d098ed + src/mistralai/gcp/client/utils/headers.py: + id: 4c369582903e last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a - src/mistralai_gcp/utils/logger.py: - id: 2992f9bda9c7 + src/mistralai/gcp/client/utils/logger.py: + id: 082d86b60820 last_write_checksum: sha1:f3fdb154a3f09b8cc43d74c7e9c02f899f8086e4 pristine_git_object: b661aff65d38b77d035149699aea09b2785d2fc6 - src/mistralai_gcp/utils/metadata.py: - id: af274ae68c93 + src/mistralai/gcp/client/utils/metadata.py: + id: ff0e832b8b9c last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d - src/mistralai_gcp/utils/queryparams.py: - id: b20aa8da5982 + src/mistralai/gcp/client/utils/queryparams.py: + id: 133b8408e73e last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 - src/mistralai_gcp/utils/requestbodies.py: - id: 1a2ddaa8f5a2 + src/mistralai/gcp/client/utils/requestbodies.py: + id: 1be13a660954 last_write_checksum: sha1:41e2d2d2d3ecc394c8122ca4d4b85e1c3e03f054 pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c - src/mistralai_gcp/utils/retries.py: - id: 8caeba1fe4ab + src/mistralai/gcp/client/utils/retries.py: + id: 542ebd75b79b last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 - src/mistralai_gcp/utils/security.py: - id: fa4f52aaad5d + src/mistralai/gcp/client/utils/security.py: + id: 5273152365f4 last_write_checksum: sha1:a17130ace2c0db6394f38dd941ad2b700cc755c8 pristine_git_object: 295a3f40031dbb40073ad227fd4a355660f97ab2 - src/mistralai_gcp/utils/serializers.py: - id: 920ccb5c87f2 + src/mistralai/gcp/client/utils/serializers.py: + id: a7836e553d41 last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 - src/mistralai_gcp/utils/unmarshal_json_response.py: - id: 65d5fa644cf8 - last_write_checksum: sha1:877dd4bb58700039a481fdf7d7216d2d9a0b3e92 - pristine_git_object: c168a293f7018fc3b83cac0d8f723475e5f05631 - src/mistralai_gcp/utils/url.py: - id: 116eb5a78ca7 + src/mistralai/gcp/client/utils/unmarshal_json_response.py: + id: d972d22cf934 + last_write_checksum: sha1:a68b9e491188e6c1956a749530eac3c7dc8004e7 + pristine_git_object: 83e8275e59adf51fb01a0579ae26627ee29fee49 + src/mistralai/gcp/client/utils/url.py: + id: 0d311bbcb8f8 last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 - src/mistralai_gcp/utils/values.py: - id: 9cc9ee47c951 + src/mistralai/gcp/client/utils/values.py: + id: 328207e9ae81 last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} responses: "422": application/json: {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} @@ -661,7 +638,7 @@ examples: application/json: {"model": "codestral-latest", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} responses: "200": - application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"role": "assistant", "content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false}, "finish_reason": "stop"}]} examplesVersion: 1.0.2 generatedTests: {} generatedFiles: diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/gcp/.speakeasy/gen.yaml similarity index 78% rename from packages/mistralai_gcp/.speakeasy/gen.yaml rename to packages/gcp/.speakeasy/gen.yaml index 2aacaa62..93cc5a42 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/gcp/.speakeasy/gen.yaml @@ -8,11 +8,13 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true - nameResolutionFeb2025: false + nameResolutionFeb2025: true parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true - securityFeb2025: false - sharedErrorComponentsApr2025: false + securityFeb2025: true + sharedErrorComponentsApr2025: true + methodSignaturesApr2024: true + sharedNestedComponentsJan2026: true auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -26,21 +28,18 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.8.0 + version: 2.0.0a4 additionalDependencies: dev: pytest: ^8.2.2 pytest-asyncio: ^0.23.7 - main: - google-auth: ^2.31.0 - requests: ^2.32.3 allowedRedefinedBuiltins: - id - object asyncMode: both authors: - Mistral - baseErrorName: MistralGcpError + baseErrorName: MistralGCPError clientServerStatusCodesAsErrors: true constFieldCasing: upper defaultErrorName: SDKError @@ -48,10 +47,13 @@ python: enableCustomCodeRegions: false enumFormat: union fixFlags: - responseRequiredSep2024: false + responseRequiredSep2024: true + flatAdditionalProperties: true flattenGlobalSecurity: true flattenRequests: true flatteningOrder: parameters-first + forwardCompatibleEnumsByDefault: true + forwardCompatibleUnionsByDefault: tagged-only imports: option: openapi paths: @@ -65,12 +67,12 @@ python: license: "" maxMethodParams: 15 methodArguments: infer-optional-args - moduleName: "" + moduleName: mistralai.gcp.client multipartArrayFormat: legacy outputModelSuffix: output packageManager: uv packageName: mistralai-gcp - preApplyUnionDiscriminators: false + preApplyUnionDiscriminators: true pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat diff --git a/packages/mistralai_gcp/CONTRIBUTING.md b/packages/gcp/CONTRIBUTING.md similarity index 100% rename from packages/mistralai_gcp/CONTRIBUTING.md rename to packages/gcp/CONTRIBUTING.md diff --git a/packages/mistralai_gcp/README.md b/packages/gcp/README.md similarity index 70% rename from packages/mistralai_gcp/README.md rename to packages/gcp/README.md index a4233244..5b66766b 100644 --- a/packages/mistralai_gcp/README.md +++ b/packages/gcp/README.md @@ -26,47 +26,58 @@ pip install mistralai[gcp] This example shows how to create chat completions. +The SDK automatically: +- Detects credentials via `google.auth.default()` +- Auto-refreshes tokens when they expire +- Builds the Vertex AI URL from `project_id` and `region` + ```python # Synchronous Example -from mistralai_gcp import MistralGCP import os -) +from mistralai.gcp.client import MistralGCP +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) res = s.chat.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, -], model="mistral-small-latest") +], model="mistral-small-2503") if res is not None: # handle response - pass + print(res.choices[0].message.content) ```
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. ```python # Asynchronous Example import asyncio -from mistralai_gcp import MistralGCP import os +from mistralai.gcp.client import MistralGCP async def main(): + # The SDK auto-detects credentials and builds the Vertex AI URL s = MistralGCP( - api_key=os.getenv("API_KEY", ""), + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), ) res = await s.chat.complete_async(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], model="mistral-small-latest") + ], model="mistral-small-2503") if res is not None: # handle response - pass + print(res.choices[0].message.content) asyncio.run(main()) ``` @@ -78,12 +89,12 @@ asyncio.run(main()) ### [chat](docs/sdks/chat/README.md) * [stream](docs/sdks/chat/README.md#stream) - Stream chat completion -* [create](docs/sdks/chat/README.md#create) - Chat Completion +* [complete](docs/sdks/chat/README.md#complete) - Chat Completion ### [fim](docs/sdks/fim/README.md) * [stream](docs/sdks/fim/README.md#stream) - Stream fim completion -* [create](docs/sdks/fim/README.md#create) - Fim Completion +* [complete](docs/sdks/fim/README.md#complete) - Fim Completion @@ -96,18 +107,21 @@ terminate when the server no longer has any events to send and closes the underlying connection. ```python -from mistralai_gcp import MistralGCP import os +from mistralai.gcp.client import MistralGCP -s = MistralGCP() - +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), +) res = s.chat.stream(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, -], model="mistral-small-latest") +], model="mistral-small-2503") if res is not None: for event in res: @@ -127,21 +141,24 @@ Some of the endpoints in this SDK support retries. If you use the SDK without an To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: ```python -from mistralai_gcp import MistralGCP -from mistralgcp.utils import BackoffStrategy, RetryConfig import os +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.utils import BackoffStrategy, RetryConfig -s = MistralGCP() - +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), +) res = s.chat.stream( messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], - model="mistral-small-latest", + ], + model="mistral-small-2503", retries=RetryConfig( "backoff", BackoffStrategy(1, 50, 1.1, 100), @@ -158,23 +175,25 @@ if res is not None: If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: ```python -from mistralai_gcp import MistralGCP -from mistralgcp.utils import BackoffStrategy, RetryConfig import os +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.utils import BackoffStrategy, RetryConfig +# The SDK auto-detects credentials and builds the Vertex AI URL s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), + region=os.environ.get("GCP_REGION", "us-central1"), retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), ) - res = s.chat.stream( messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], - model="mistral-small-latest" + model="mistral-small-2503", ) if res is not None: @@ -188,7 +207,7 @@ if res is not None: ## Error Handling -Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. +Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. | Error Object | Status Code | Content Type | | -------------------------- | ----------- | ---------------- | @@ -198,21 +217,26 @@ Handling errors in this SDK should largely match your expectations. All operati ### Example ```python -from mistralai_gcp import MistralGCP, models import os +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client import models -s = MistralGCP() +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), + region=os.environ.get("GCP_REGION", "us-central1"), +) res = None try: res = s.chat.complete( messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], - model="mistral-small-latest" + model="mistral-small-2503", ) except models.HTTPValidationError as e: @@ -232,61 +256,27 @@ if res is not None: ## Server Selection -### Select Server by Name - -You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: - -| Name | Server | Variables | -| ------ | ------------------------ | --------- | -| `prod` | `https://api.mistral.ai` | None | - -#### Example - -```python -from mistralai_gcp import MistralGCP -import os - -s = MistralGCP(server="prod") - - -res = s.chat.stream( - messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], - model="mistral-small-latest" -) - -if res is not None: - for event in res: - # handle event - print(event) - -``` - - ### Override Server URL Per-Client -The default server can also be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: +The SDK automatically constructs the Vertex AI endpoint from `project_id` and `region`: ```python -from mistralai_gcp import MistralGCP import os +from mistralai.gcp.client import MistralGCP +# The SDK auto-detects credentials and builds the Vertex AI URL s = MistralGCP( - server_url="https://api.mistral.ai", + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), ) - res = s.chat.stream( messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], - model="mistral-small-latest" + ], + model="mistral-small-2503", ) if res is not None: @@ -306,17 +296,23 @@ This allows you to wrap the client with your own custom logic, such as adding cu For example, you could specify a header for every request that this sdk makes as follows: ```python -from mistralai_gcp import MistralGCP +import os +from mistralai.gcp.client import MistralGCP import httpx http_client = httpx.Client(headers={"x-custom-header": "someValue"}) -s = MistralGCP(client=http_client) +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), + region="us-central1", + client=http_client, +) ``` or you could wrap the client with your own custom logic: ```python -from mistralai_gcp import MistralGCP -from mistralai_gcp.httpclient import AsyncHttpClient +from typing import Any, Optional, Union +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.httpclient import AsyncHttpClient import httpx class CustomClient(AsyncHttpClient): @@ -374,7 +370,11 @@ class CustomClient(AsyncHttpClient): extensions=extensions, ) -s = MistralGCP(async_client=CustomClient(httpx.AsyncClient())) +s = MistralGCP( + project_id="", + region="us-central1", + async_client=CustomClient(httpx.AsyncClient()), +) ``` @@ -389,22 +389,25 @@ This SDK supports the following security scheme globally: | --------- | ---- | ----------- | | `api_key` | http | HTTP Bearer | -To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: +The SDK automatically handles GCP authentication via `google.auth.default()`. Tokens are auto-refreshed when they expire. For example: ```python -from mistralai_gcp import MistralGCP import os +from mistralai.gcp.client import MistralGCP -s = MistralGCP() - +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), +) res = s.chat.stream( messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], - model="mistral-small-latest" + ], + model="mistral-small-2503", ) if res is not None: @@ -421,5 +424,5 @@ if res is not None: ## Contributions -While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. -We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. +While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. +We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/packages/mistralai_gcp/RELEASES.md b/packages/gcp/RELEASES.md similarity index 100% rename from packages/mistralai_gcp/RELEASES.md rename to packages/gcp/RELEASES.md diff --git a/packages/gcp/USAGE.md b/packages/gcp/USAGE.md new file mode 100644 index 00000000..3156349d --- /dev/null +++ b/packages/gcp/USAGE.md @@ -0,0 +1,61 @@ + +### Create Chat Completions + +This example shows how to create chat completions. + +The SDK automatically: +- Detects credentials via `google.auth.default()` +- Auto-refreshes tokens when they expire +- Builds the Vertex AI URL from `project_id` and `region` + +```python +# Synchronous Example +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.chat.complete(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model="mistral-small-2503") + +if res is not None: + # handle response + print(res.choices[0].message.content) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +import os +from mistralai.gcp.client import MistralGCP + +async def main(): + # The SDK auto-detects credentials and builds the Vertex AI URL + s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), + ) + res = await s.chat.complete_async(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], model="mistral-small-2503") + if res is not None: + # handle response + print(res.choices[0].message.content) + +asyncio.run(main()) +``` + diff --git a/packages/mistralai_gcp/docs/models/arguments.md b/packages/gcp/docs/models/arguments.md similarity index 100% rename from packages/mistralai_gcp/docs/models/arguments.md rename to packages/gcp/docs/models/arguments.md diff --git a/packages/mistralai_gcp/docs/models/assistantmessage.md b/packages/gcp/docs/models/assistantmessage.md similarity index 95% rename from packages/mistralai_gcp/docs/models/assistantmessage.md rename to packages/gcp/docs/models/assistantmessage.md index 3d0bd90b..9ef63837 100644 --- a/packages/mistralai_gcp/docs/models/assistantmessage.md +++ b/packages/gcp/docs/models/assistantmessage.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | | `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | | `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/assistantmessagecontent.md b/packages/gcp/docs/models/assistantmessagecontent.md similarity index 100% rename from packages/mistralai_gcp/docs/models/assistantmessagecontent.md rename to packages/gcp/docs/models/assistantmessagecontent.md diff --git a/packages/mistralai_gcp/docs/models/chatcompletionchoice.md b/packages/gcp/docs/models/chatcompletionchoice.md similarity index 100% rename from packages/mistralai_gcp/docs/models/chatcompletionchoice.md rename to packages/gcp/docs/models/chatcompletionchoice.md diff --git a/packages/mistralai_gcp/docs/models/chatcompletionchoicefinishreason.md b/packages/gcp/docs/models/chatcompletionchoicefinishreason.md similarity index 100% rename from packages/mistralai_gcp/docs/models/chatcompletionchoicefinishreason.md rename to packages/gcp/docs/models/chatcompletionchoicefinishreason.md diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/gcp/docs/models/chatcompletionrequest.md similarity index 99% rename from packages/mistralai_gcp/docs/models/chatcompletionrequest.md rename to packages/gcp/docs/models/chatcompletionrequest.md index 6886f9dc..61a25d86 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md +++ b/packages/gcp/docs/models/chatcompletionrequest.md @@ -13,7 +13,7 @@ | `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.ChatCompletionRequestMessage](../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | diff --git a/packages/mistralai_gcp/docs/models/messages.md b/packages/gcp/docs/models/chatcompletionrequestmessage.md similarity index 92% rename from packages/mistralai_gcp/docs/models/messages.md rename to packages/gcp/docs/models/chatcompletionrequestmessage.md index 1d394500..91e9e062 100644 --- a/packages/mistralai_gcp/docs/models/messages.md +++ b/packages/gcp/docs/models/chatcompletionrequestmessage.md @@ -1,4 +1,4 @@ -# Messages +# ChatCompletionRequestMessage ## Supported Types diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequeststop.md b/packages/gcp/docs/models/chatcompletionrequeststop.md similarity index 100% rename from packages/mistralai_gcp/docs/models/chatcompletionrequeststop.md rename to packages/gcp/docs/models/chatcompletionrequeststop.md diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md b/packages/gcp/docs/models/chatcompletionrequesttoolchoice.md similarity index 100% rename from packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md rename to packages/gcp/docs/models/chatcompletionrequesttoolchoice.md diff --git a/packages/mistralai_gcp/docs/models/chatcompletionresponse.md b/packages/gcp/docs/models/chatcompletionresponse.md similarity index 100% rename from packages/mistralai_gcp/docs/models/chatcompletionresponse.md rename to packages/gcp/docs/models/chatcompletionresponse.md diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/gcp/docs/models/chatcompletionstreamrequest.md similarity index 99% rename from packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md rename to packages/gcp/docs/models/chatcompletionstreamrequest.md index ff1940dd..3e790e7d 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md +++ b/packages/gcp/docs/models/chatcompletionstreamrequest.md @@ -10,10 +10,10 @@ | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.ChatCompletionStreamRequestMessage](../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequestmessages.md b/packages/gcp/docs/models/chatcompletionstreamrequestmessage.md similarity index 91% rename from packages/mistralai_azure/docs/models/chatcompletionrequestmessages.md rename to packages/gcp/docs/models/chatcompletionstreamrequestmessage.md index bc7708a6..2e4e93ac 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequestmessages.md +++ b/packages/gcp/docs/models/chatcompletionstreamrequestmessage.md @@ -1,4 +1,4 @@ -# ChatCompletionRequestMessages +# ChatCompletionStreamRequestMessage ## Supported Types diff --git a/packages/mistralai_azure/docs/models/stop.md b/packages/gcp/docs/models/chatcompletionstreamrequeststop.md similarity index 88% rename from packages/mistralai_azure/docs/models/stop.md rename to packages/gcp/docs/models/chatcompletionstreamrequeststop.md index ba40ca83..a48460a9 100644 --- a/packages/mistralai_azure/docs/models/stop.md +++ b/packages/gcp/docs/models/chatcompletionstreamrequeststop.md @@ -1,4 +1,4 @@ -# Stop +# ChatCompletionStreamRequestStop Stop generation if this token is detected. Or if one of these tokens is detected when providing an array diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md b/packages/gcp/docs/models/chatcompletionstreamrequesttoolchoice.md similarity index 100% rename from packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md rename to packages/gcp/docs/models/chatcompletionstreamrequesttoolchoice.md diff --git a/packages/mistralai_gcp/docs/models/completionchunk.md b/packages/gcp/docs/models/completionchunk.md similarity index 100% rename from packages/mistralai_gcp/docs/models/completionchunk.md rename to packages/gcp/docs/models/completionchunk.md diff --git a/packages/mistralai_gcp/docs/models/completionevent.md b/packages/gcp/docs/models/completionevent.md similarity index 100% rename from packages/mistralai_gcp/docs/models/completionevent.md rename to packages/gcp/docs/models/completionevent.md diff --git a/packages/gcp/docs/models/completionresponsestreamchoice.md b/packages/gcp/docs/models/completionresponsestreamchoice.md new file mode 100644 index 00000000..1532c25b --- /dev/null +++ b/packages/gcp/docs/models/completionresponsestreamchoice.md @@ -0,0 +1,10 @@ +# CompletionResponseStreamChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | +| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | +| `finish_reason` | [Nullable[models.CompletionResponseStreamChoiceFinishReason]](../models/completionresponsestreamchoicefinishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/finishreason.md b/packages/gcp/docs/models/completionresponsestreamchoicefinishreason.md similarity index 81% rename from packages/mistralai_azure/docs/models/finishreason.md rename to packages/gcp/docs/models/completionresponsestreamchoicefinishreason.md index 45a5aedb..0fece473 100644 --- a/packages/mistralai_azure/docs/models/finishreason.md +++ b/packages/gcp/docs/models/completionresponsestreamchoicefinishreason.md @@ -1,4 +1,4 @@ -# FinishReason +# CompletionResponseStreamChoiceFinishReason ## Values diff --git a/packages/mistralai_gcp/docs/models/contentchunk.md b/packages/gcp/docs/models/contentchunk.md similarity index 100% rename from packages/mistralai_gcp/docs/models/contentchunk.md rename to packages/gcp/docs/models/contentchunk.md diff --git a/packages/gcp/docs/models/deltamessage.md b/packages/gcp/docs/models/deltamessage.md new file mode 100644 index 00000000..e0ee575f --- /dev/null +++ b/packages/gcp/docs/models/deltamessage.md @@ -0,0 +1,10 @@ +# DeltaMessage + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.DeltaMessageContent]](../models/deltamessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/content.md b/packages/gcp/docs/models/deltamessagecontent.md similarity index 89% rename from packages/mistralai_azure/docs/models/content.md rename to packages/gcp/docs/models/deltamessagecontent.md index a833dc2c..8142772d 100644 --- a/packages/mistralai_azure/docs/models/content.md +++ b/packages/gcp/docs/models/deltamessagecontent.md @@ -1,4 +1,4 @@ -# Content +# DeltaMessageContent ## Supported Types diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md b/packages/gcp/docs/models/fimcompletionrequest.md similarity index 100% rename from packages/mistralai_gcp/docs/models/fimcompletionrequest.md rename to packages/gcp/docs/models/fimcompletionrequest.md diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequeststop.md b/packages/gcp/docs/models/fimcompletionrequeststop.md similarity index 100% rename from packages/mistralai_gcp/docs/models/fimcompletionrequeststop.md rename to packages/gcp/docs/models/fimcompletionrequeststop.md diff --git a/packages/mistralai_gcp/docs/models/fimcompletionresponse.md b/packages/gcp/docs/models/fimcompletionresponse.md similarity index 100% rename from packages/mistralai_gcp/docs/models/fimcompletionresponse.md rename to packages/gcp/docs/models/fimcompletionresponse.md diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md b/packages/gcp/docs/models/fimcompletionstreamrequest.md similarity index 100% rename from packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md rename to packages/gcp/docs/models/fimcompletionstreamrequest.md diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequeststop.md b/packages/gcp/docs/models/fimcompletionstreamrequeststop.md similarity index 100% rename from packages/mistralai_gcp/docs/models/fimcompletionstreamrequeststop.md rename to packages/gcp/docs/models/fimcompletionstreamrequeststop.md diff --git a/packages/mistralai_gcp/docs/models/function.md b/packages/gcp/docs/models/function.md similarity index 100% rename from packages/mistralai_gcp/docs/models/function.md rename to packages/gcp/docs/models/function.md diff --git a/packages/mistralai_gcp/docs/models/functioncall.md b/packages/gcp/docs/models/functioncall.md similarity index 100% rename from packages/mistralai_gcp/docs/models/functioncall.md rename to packages/gcp/docs/models/functioncall.md diff --git a/packages/mistralai_gcp/docs/models/functionname.md b/packages/gcp/docs/models/functionname.md similarity index 100% rename from packages/mistralai_gcp/docs/models/functionname.md rename to packages/gcp/docs/models/functionname.md diff --git a/packages/mistralai_gcp/docs/models/httpvalidationerror.md b/packages/gcp/docs/models/httpvalidationerror.md similarity index 100% rename from packages/mistralai_gcp/docs/models/httpvalidationerror.md rename to packages/gcp/docs/models/httpvalidationerror.md diff --git a/packages/gcp/docs/models/imagedetail.md b/packages/gcp/docs/models/imagedetail.md new file mode 100644 index 00000000..1e5ba3fd --- /dev/null +++ b/packages/gcp/docs/models/imagedetail.md @@ -0,0 +1,10 @@ +# ImageDetail + + +## Values + +| Name | Value | +| ------ | ------ | +| `LOW` | low | +| `AUTO` | auto | +| `HIGH` | high | \ No newline at end of file diff --git a/packages/gcp/docs/models/imageurl.md b/packages/gcp/docs/models/imageurl.md new file mode 100644 index 00000000..6358e0ac --- /dev/null +++ b/packages/gcp/docs/models/imageurl.md @@ -0,0 +1,9 @@ +# ImageURL + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `url` | *str* | :heavy_check_mark: | N/A | +| `detail` | [OptionalNullable[models.ImageDetail]](../models/imagedetail.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/imageurlchunk.md b/packages/gcp/docs/models/imageurlchunk.md new file mode 100644 index 00000000..a84dac32 --- /dev/null +++ b/packages/gcp/docs/models/imageurlchunk.md @@ -0,0 +1,11 @@ +# ImageURLChunk + +{"type":"image_url","image_url":{"url":"data:image/png;base64,iVBORw0 + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `type` | *Literal["image_url"]* | :heavy_check_mark: | N/A | +| `image_url` | [models.ImageURLUnion](../models/imageurlunion.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/imageurlchunkimageurl.md b/packages/gcp/docs/models/imageurlunion.md similarity index 86% rename from packages/mistralai_azure/docs/models/imageurlchunkimageurl.md rename to packages/gcp/docs/models/imageurlunion.md index 76738908..db97130f 100644 --- a/packages/mistralai_azure/docs/models/imageurlchunkimageurl.md +++ b/packages/gcp/docs/models/imageurlunion.md @@ -1,4 +1,4 @@ -# ImageURLChunkImageURL +# ImageURLUnion ## Supported Types diff --git a/packages/mistralai_gcp/docs/models/jsonschema.md b/packages/gcp/docs/models/jsonschema.md similarity index 100% rename from packages/mistralai_gcp/docs/models/jsonschema.md rename to packages/gcp/docs/models/jsonschema.md diff --git a/packages/mistralai_gcp/docs/models/loc.md b/packages/gcp/docs/models/loc.md similarity index 100% rename from packages/mistralai_gcp/docs/models/loc.md rename to packages/gcp/docs/models/loc.md diff --git a/packages/mistralai_gcp/docs/models/mistralpromptmode.md b/packages/gcp/docs/models/mistralpromptmode.md similarity index 100% rename from packages/mistralai_gcp/docs/models/mistralpromptmode.md rename to packages/gcp/docs/models/mistralpromptmode.md diff --git a/packages/mistralai_gcp/docs/models/prediction.md b/packages/gcp/docs/models/prediction.md similarity index 100% rename from packages/mistralai_gcp/docs/models/prediction.md rename to packages/gcp/docs/models/prediction.md diff --git a/packages/gcp/docs/models/referencechunk.md b/packages/gcp/docs/models/referencechunk.md new file mode 100644 index 00000000..d847e248 --- /dev/null +++ b/packages/gcp/docs/models/referencechunk.md @@ -0,0 +1,9 @@ +# ReferenceChunk + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------- | -------------------------------- | -------------------------------- | -------------------------------- | +| `type` | *Optional[Literal["reference"]]* | :heavy_minus_sign: | N/A | +| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformat.md b/packages/gcp/docs/models/responseformat.md similarity index 100% rename from packages/mistralai_gcp/docs/models/responseformat.md rename to packages/gcp/docs/models/responseformat.md diff --git a/packages/mistralai_gcp/docs/models/responseformats.md b/packages/gcp/docs/models/responseformats.md similarity index 100% rename from packages/mistralai_gcp/docs/models/responseformats.md rename to packages/gcp/docs/models/responseformats.md diff --git a/packages/mistralai_gcp/docs/models/security.md b/packages/gcp/docs/models/security.md similarity index 100% rename from packages/mistralai_gcp/docs/models/security.md rename to packages/gcp/docs/models/security.md diff --git a/packages/mistralai_azure/docs/models/systemmessage.md b/packages/gcp/docs/models/systemmessage.md similarity index 88% rename from packages/mistralai_azure/docs/models/systemmessage.md rename to packages/gcp/docs/models/systemmessage.md index 0dba71c0..10bda10f 100644 --- a/packages/mistralai_azure/docs/models/systemmessage.md +++ b/packages/gcp/docs/models/systemmessage.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `role` | *Literal["system"]* | :heavy_check_mark: | N/A | +| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/systemmessagecontent.md b/packages/gcp/docs/models/systemmessagecontent.md similarity index 100% rename from packages/mistralai_gcp/docs/models/systemmessagecontent.md rename to packages/gcp/docs/models/systemmessagecontent.md diff --git a/packages/mistralai_gcp/docs/models/systemmessagecontentchunks.md b/packages/gcp/docs/models/systemmessagecontentchunks.md similarity index 100% rename from packages/mistralai_gcp/docs/models/systemmessagecontentchunks.md rename to packages/gcp/docs/models/systemmessagecontentchunks.md diff --git a/packages/gcp/docs/models/textchunk.md b/packages/gcp/docs/models/textchunk.md new file mode 100644 index 00000000..b266619d --- /dev/null +++ b/packages/gcp/docs/models/textchunk.md @@ -0,0 +1,9 @@ +# TextChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `type` | *Literal["text"]* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/thinkchunk.md b/packages/gcp/docs/models/thinkchunk.md similarity index 91% rename from packages/mistralai_gcp/docs/models/thinkchunk.md rename to packages/gcp/docs/models/thinkchunk.md index 66b2e0cd..b07f598e 100644 --- a/packages/mistralai_gcp/docs/models/thinkchunk.md +++ b/packages/gcp/docs/models/thinkchunk.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `type` | *Literal["thinking"]* | :heavy_check_mark: | N/A | | `thinking` | List[[models.Thinking](../models/thinking.md)] | :heavy_check_mark: | N/A | -| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | -| `type` | [Optional[models.ThinkChunkType]](../models/thinkchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/thinking.md b/packages/gcp/docs/models/thinking.md similarity index 100% rename from packages/mistralai_gcp/docs/models/thinking.md rename to packages/gcp/docs/models/thinking.md diff --git a/packages/mistralai_gcp/docs/models/tool.md b/packages/gcp/docs/models/tool.md similarity index 100% rename from packages/mistralai_gcp/docs/models/tool.md rename to packages/gcp/docs/models/tool.md diff --git a/packages/mistralai_gcp/docs/models/toolcall.md b/packages/gcp/docs/models/toolcall.md similarity index 100% rename from packages/mistralai_gcp/docs/models/toolcall.md rename to packages/gcp/docs/models/toolcall.md diff --git a/packages/mistralai_gcp/docs/models/toolchoice.md b/packages/gcp/docs/models/toolchoice.md similarity index 100% rename from packages/mistralai_gcp/docs/models/toolchoice.md rename to packages/gcp/docs/models/toolchoice.md diff --git a/packages/mistralai_gcp/docs/models/toolchoiceenum.md b/packages/gcp/docs/models/toolchoiceenum.md similarity index 100% rename from packages/mistralai_gcp/docs/models/toolchoiceenum.md rename to packages/gcp/docs/models/toolchoiceenum.md diff --git a/packages/mistralai_azure/docs/models/toolmessage.md b/packages/gcp/docs/models/toolmessage.md similarity index 92% rename from packages/mistralai_azure/docs/models/toolmessage.md rename to packages/gcp/docs/models/toolmessage.md index a54f4933..7201481e 100644 --- a/packages/mistralai_azure/docs/models/toolmessage.md +++ b/packages/gcp/docs/models/toolmessage.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `role` | *Literal["tool"]* | :heavy_check_mark: | N/A | | `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | | `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolmessagecontent.md b/packages/gcp/docs/models/toolmessagecontent.md similarity index 100% rename from packages/mistralai_gcp/docs/models/toolmessagecontent.md rename to packages/gcp/docs/models/toolmessagecontent.md diff --git a/packages/mistralai_gcp/docs/models/tooltypes.md b/packages/gcp/docs/models/tooltypes.md similarity index 100% rename from packages/mistralai_gcp/docs/models/tooltypes.md rename to packages/gcp/docs/models/tooltypes.md diff --git a/packages/mistralai_gcp/docs/models/usageinfo.md b/packages/gcp/docs/models/usageinfo.md similarity index 100% rename from packages/mistralai_gcp/docs/models/usageinfo.md rename to packages/gcp/docs/models/usageinfo.md diff --git a/packages/mistralai_azure/docs/models/usermessage.md b/packages/gcp/docs/models/usermessage.md similarity index 89% rename from packages/mistralai_azure/docs/models/usermessage.md rename to packages/gcp/docs/models/usermessage.md index 63b01310..e7a932ed 100644 --- a/packages/mistralai_azure/docs/models/usermessage.md +++ b/packages/gcp/docs/models/usermessage.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `role` | *Literal["user"]* | :heavy_check_mark: | N/A | +| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usermessagecontent.md b/packages/gcp/docs/models/usermessagecontent.md similarity index 100% rename from packages/mistralai_gcp/docs/models/usermessagecontent.md rename to packages/gcp/docs/models/usermessagecontent.md diff --git a/packages/mistralai_gcp/docs/models/utils/retryconfig.md b/packages/gcp/docs/models/utils/retryconfig.md similarity index 100% rename from packages/mistralai_gcp/docs/models/utils/retryconfig.md rename to packages/gcp/docs/models/utils/retryconfig.md diff --git a/packages/mistralai_gcp/docs/models/validationerror.md b/packages/gcp/docs/models/validationerror.md similarity index 100% rename from packages/mistralai_gcp/docs/models/validationerror.md rename to packages/gcp/docs/models/validationerror.md diff --git a/packages/mistralai_gcp/docs/sdks/chat/README.md b/packages/gcp/docs/sdks/chat/README.md similarity index 96% rename from packages/mistralai_gcp/docs/sdks/chat/README.md rename to packages/gcp/docs/sdks/chat/README.md index 6f5f1977..a1fdfd9a 100644 --- a/packages/mistralai_gcp/docs/sdks/chat/README.md +++ b/packages/gcp/docs/sdks/chat/README.md @@ -8,7 +8,7 @@ Chat Completion API. ### Available Operations * [stream](#stream) - Stream chat completion -* [create](#create) - Chat Completion +* [complete](#complete) - Chat Completion ## stream @@ -17,18 +17,21 @@ Mistral AI provides the ability to stream responses back to a client in order to ### Example Usage ```python -from mistralai_gcp import MistralGCP import os +from mistralai.gcp.client import MistralGCP -s = MistralGCP() - +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) res = s.chat.stream(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, -], model="mistral-small-latest") +], model="mistral-small-2503") if res is not None: for event in res: @@ -65,29 +68,32 @@ if res is not None: | --------------- | ----------- | ------------ | | models.SDKError | 4xx-5xx | */* | -## create +## complete Chat Completion ### Example Usage ```python -from mistralai_gcp import MistralGCP import os +from mistralai.gcp.client import MistralGCP -s = MistralGCP() - +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) res = s.chat.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, -], model="mistral-small-latest") +], model="mistral-small-2503") if res is not None: # handle response - pass + print(res.choices[0].message.content) ``` diff --git a/packages/mistralai_gcp/docs/sdks/fim/README.md b/packages/gcp/docs/sdks/fim/README.md similarity index 94% rename from packages/mistralai_gcp/docs/sdks/fim/README.md rename to packages/gcp/docs/sdks/fim/README.md index b997fabf..61a28883 100644 --- a/packages/mistralai_gcp/docs/sdks/fim/README.md +++ b/packages/gcp/docs/sdks/fim/README.md @@ -8,7 +8,7 @@ Fill-in-the-middle API. ### Available Operations * [stream](#stream) - Stream fim completion -* [create](#create) - Fim Completion +* [complete](#complete) - Fim Completion ## stream @@ -17,13 +17,16 @@ Mistral AI provides the ability to stream responses back to a client in order to ### Example Usage ```python -from mistralai_gcp import MistralGCP import os +from mistralai.gcp.client import MistralGCP -s = MistralGCP() +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) - -res = s.fim.stream(prompt="def", model="codestral-2405", suffix="return a+b") +res = s.fim.stream(prompt="def", model="codestral-2", suffix="return a+b") if res is not None: for event in res: @@ -58,24 +61,27 @@ if res is not None: | --------------- | ----------- | ------------ | | models.SDKError | 4xx-5xx | */* | -## create +## complete FIM completion. ### Example Usage ```python -from mistralai_gcp import MistralGCP import os +from mistralai.gcp.client import MistralGCP -s = MistralGCP() - +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) -res = s.fim.complete(prompt="def", model="codestral-2405", suffix="return a+b") +res = s.fim.complete(prompt="def", model="codestral-2", suffix="return a+b") if res is not None: # handle response - pass + print(res.choices[0].message.content) ``` diff --git a/packages/mistralai_gcp/docs/sdks/mistralgcp/README.md b/packages/gcp/docs/sdks/mistralgcp/README.md similarity index 100% rename from packages/mistralai_gcp/docs/sdks/mistralgcp/README.md rename to packages/gcp/docs/sdks/mistralgcp/README.md diff --git a/packages/mistralai_gcp/py.typed b/packages/gcp/py.typed similarity index 100% rename from packages/mistralai_gcp/py.typed rename to packages/gcp/py.typed diff --git a/packages/mistralai_gcp/pylintrc b/packages/gcp/pylintrc similarity index 100% rename from packages/mistralai_gcp/pylintrc rename to packages/gcp/pylintrc diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/gcp/pyproject.toml similarity index 78% rename from packages/mistralai_gcp/pyproject.toml rename to packages/gcp/pyproject.toml index df3e43ae..98619ecd 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/gcp/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai-gcp" -version = "1.8.0" +version = "2.0.0a4" description = "Python Client SDK for the Mistral AI API in GCP." authors = [{ name = "Mistral" }] requires-python = ">=3.10" @@ -9,7 +9,7 @@ dependencies = [ "eval-type-backport >=0.2.0", "google-auth (>=2.31.0,<3.0.0)", "httpx >=0.28.1", - "pydantic >=2.10.3", + "pydantic >=2.11.2", "python-dateutil >=2.8.2", "requests (>=2.32.3,<3.0.0)", "typing-inspection >=0.4.0", @@ -17,7 +17,7 @@ dependencies = [ [dependency-groups] dev = [ - "mypy==1.14.1", + "mypy==1.15.0", "pylint==3.2.3", "pyright>=1.1.401,<2", "pytest>=8.2.2,<9", @@ -26,20 +26,20 @@ dev = [ ] [tool.setuptools.package-data] -"*" = ["py.typed", "src/mistralai_gcp/py.typed"] +"*" = ["py.typed", "src/mistralai/gcp/client/py.typed"] [tool.hatch.build.targets.sdist] -include = ["src/mistralai_gcp"] +include = ["src/mistralai"] [tool.hatch.build.targets.sdist.force-include] "py.typed" = "py.typed" -"src/mistralai_gcp/py.typed" = "src/mistralai_gcp/py.typed" +"src/mistralai/gcp/client/py.typed" = "src/mistralai/gcp/client/py.typed" [tool.hatch.build.targets.wheel] -include = ["src/mistralai_gcp"] +include = ["src/mistralai"] [tool.hatch.build.targets.wheel.sources] -"src/mistralai_gcp" = "mistralai_gcp" +"src" = "" [virtualenvs] in-project = true @@ -53,6 +53,9 @@ pythonpath = ["src"] [tool.mypy] disable_error_code = "misc" +namespace_packages = true +explicit_package_bases = true +mypy_path = "src" [[tool.mypy.overrides]] module = "typing_inspect" diff --git a/packages/mistralai_azure/scripts/prepare_readme.py b/packages/gcp/scripts/prepare_readme.py similarity index 96% rename from packages/mistralai_azure/scripts/prepare_readme.py rename to packages/gcp/scripts/prepare_readme.py index ff1121fd..ae27b555 100644 --- a/packages/mistralai_azure/scripts/prepare_readme.py +++ b/packages/gcp/scripts/prepare_readme.py @@ -10,7 +10,7 @@ GITHUB_URL = ( GITHUB_URL[: -len(".git")] if GITHUB_URL.endswith(".git") else GITHUB_URL ) - REPO_SUBDIR = "packages/mistralai_azure" + REPO_SUBDIR = "packages/gcp" # Ensure the subdirectory has a trailing slash if not REPO_SUBDIR.endswith("/"): REPO_SUBDIR += "/" diff --git a/packages/mistralai_gcp/scripts/publish.sh b/packages/gcp/scripts/publish.sh similarity index 100% rename from packages/mistralai_gcp/scripts/publish.sh rename to packages/gcp/scripts/publish.sh diff --git a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py b/packages/gcp/src/mistralai/gcp/client/__init__.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/__init__.py rename to packages/gcp/src/mistralai/gcp/client/__init__.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/__init__.py b/packages/gcp/src/mistralai/gcp/client/_hooks/__init__.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/_hooks/__init__.py rename to packages/gcp/src/mistralai/gcp/client/_hooks/__init__.py diff --git a/packages/gcp/src/mistralai/gcp/client/_hooks/registration.py b/packages/gcp/src/mistralai/gcp/client/_hooks/registration.py new file mode 100644 index 00000000..23d3283d --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/_hooks/registration.py @@ -0,0 +1,67 @@ +import json +import logging +from .types import BeforeRequestHook, BeforeRequestContext, Hooks +import httpx + +logger = logging.getLogger(__name__) + + +# This file is only ever generated once on the first generation and then is free to be modified. +# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them +# in this file or in separate files in the hooks folder. + + +class GCPVertexAIPathHook(BeforeRequestHook): + """Build full Vertex AI URL path from project_id, region, and model. + + Extracts model from request body and builds the Vertex AI URL dynamically. + """ + + def __init__(self, project_id: str, region: str): + self.project_id = project_id + self.region = region + + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> httpx.Request: + if not request.content: + return request + + try: + body = json.loads(request.content.decode("utf-8")) + except (UnicodeDecodeError, json.JSONDecodeError): + # Non-JSON body (e.g. multipart upload) — pass through unmodified + return request + + model = body.get("model") + if not model: + logger.warning( + "GCPVertexAIPathHook: request body has no 'model' field; " + "Vertex AI path will not be constructed. " + "Operation: %s", + hook_ctx.operation_id, + ) + return request + + is_streaming = "stream" in hook_ctx.operation_id.lower() + specifier = "streamRawPredict" if is_streaming else "rawPredict" + + path = ( + f"/v1/projects/{self.project_id}/locations/{self.region}/" + f"publishers/mistralai/models/{model}:{specifier}" + ) + + return httpx.Request( + method=request.method, + url=request.url.copy_with(path=path), + headers=request.headers, + content=request.content, + ) + + +def init_hooks(_hooks: Hooks) -> None: + """Initialize hooks. Called by SDKHooks.__init__. + + Note: GCPVertexAIPathHook requires project_id and region, so it is + registered separately in MistralGCP.__init__ after those values are known. + """ diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py b/packages/gcp/src/mistralai/gcp/client/_hooks/sdkhooks.py similarity index 97% rename from packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py rename to packages/gcp/src/mistralai/gcp/client/_hooks/sdkhooks.py index b81c2a27..2af4deed 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py +++ b/packages/gcp/src/mistralai/gcp/client/_hooks/sdkhooks.py @@ -13,7 +13,7 @@ ) from .registration import init_hooks from typing import List, Optional, Tuple -from mistralai_gcp.httpclient import HttpClient +from mistralai.gcp.client.httpclient import HttpClient class SDKHooks(Hooks): diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py b/packages/gcp/src/mistralai/gcp/client/_hooks/types.py similarity index 96% rename from packages/mistralai_azure/src/mistralai_azure/_hooks/types.py rename to packages/gcp/src/mistralai/gcp/client/_hooks/types.py index 0c22d7eb..ea95bed2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py +++ b/packages/gcp/src/mistralai/gcp/client/_hooks/types.py @@ -2,8 +2,8 @@ from abc import ABC, abstractmethod import httpx -from mistralai_azure.httpclient import HttpClient -from mistralai_azure.sdkconfiguration import SDKConfiguration +from mistralai.gcp.client.httpclient import HttpClient +from mistralai.gcp.client.sdkconfiguration import SDKConfiguration from typing import Any, Callable, List, Optional, Tuple, Union diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_version.py b/packages/gcp/src/mistralai/gcp/client/_version.py similarity index 76% rename from packages/mistralai_gcp/src/mistralai_gcp/_version.py rename to packages/gcp/src/mistralai/gcp/client/_version.py index a170f0ab..ba48dac1 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_version.py +++ b/packages/gcp/src/mistralai/gcp/client/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-gcp" -__version__: str = "1.8.0" +__version__: str = "2.0.0a4" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 1.8.0 2.794.1 1.0.0 mistralai-gcp" +__user_agent__: str = "speakeasy-sdk/python 2.0.0a4 2.794.1 1.0.0 mistralai-gcp" try: if __package__ is not None: diff --git a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py b/packages/gcp/src/mistralai/gcp/client/basesdk.py similarity index 98% rename from packages/mistralai_gcp/src/mistralai_gcp/basesdk.py rename to packages/gcp/src/mistralai/gcp/client/basesdk.py index 7a93de23..6f9f5fd9 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py +++ b/packages/gcp/src/mistralai/gcp/client/basesdk.py @@ -2,13 +2,17 @@ from .sdkconfiguration import SDKConfiguration import httpx -from mistralai_gcp import models, utils -from mistralai_gcp._hooks import ( +from mistralai.gcp.client import models, utils +from mistralai.gcp.client._hooks import ( AfterErrorContext, AfterSuccessContext, BeforeRequestContext, ) -from mistralai_gcp.utils import RetryConfig, SerializedRequestBody, get_body_content +from mistralai.gcp.client.utils import ( + RetryConfig, + SerializedRequestBody, + get_body_content, +) from typing import Callable, List, Mapping, Optional, Tuple from urllib.parse import parse_qs, urlparse diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/gcp/src/mistralai/gcp/client/chat.py similarity index 96% rename from packages/mistralai_gcp/src/mistralai_gcp/chat.py rename to packages/gcp/src/mistralai/gcp/client/chat.py index 3dd6040f..78541248 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/gcp/src/mistralai/gcp/client/chat.py @@ -1,11 +1,11 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK -from mistralai_gcp import models, utils -from mistralai_gcp._hooks import HookContext -from mistralai_gcp.types import OptionalNullable, UNSET -from mistralai_gcp.utils import eventstreaming -from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response +from mistralai.gcp.client import models, utils +from mistralai.gcp.client._hooks import HookContext +from mistralai.gcp.client.types import OptionalNullable, UNSET +from mistralai.gcp.client.utils import eventstreaming +from mistralai.gcp.client.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Dict, List, Mapping, Optional, Union @@ -16,12 +16,20 @@ def stream( self, *, model: str, - messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ @@ -95,7 +103,9 @@ def stream( stop=stop, random_seed=random_seed, metadata=metadata, - messages=utils.get_pydantic_model(messages, List[models.Messages]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] ), @@ -183,12 +193,20 @@ async def stream_async( self, *, model: str, - messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ @@ -262,7 +280,9 @@ async def stream_async( stop=stop, random_seed=random_seed, metadata=metadata, - messages=utils.get_pydantic_model(messages, List[models.Messages]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] ), @@ -351,8 +371,8 @@ def complete( *, model: str, messages: Union[ - List[models.ChatCompletionRequestMessages], - List[models.ChatCompletionRequestMessagesTypedDict], + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -436,7 +456,7 @@ def complete( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionRequestMessages] + messages, List[models.ChatCompletionRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -518,8 +538,8 @@ async def complete_async( *, model: str, messages: Union[ - List[models.ChatCompletionRequestMessages], - List[models.ChatCompletionRequestMessagesTypedDict], + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -603,7 +623,7 @@ async def complete_async( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionRequestMessages] + messages, List[models.ChatCompletionRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/gcp/src/mistralai/gcp/client/fim.py similarity index 98% rename from packages/mistralai_gcp/src/mistralai_gcp/fim.py rename to packages/gcp/src/mistralai/gcp/client/fim.py index 36d9fd60..e2acacd5 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ b/packages/gcp/src/mistralai/gcp/client/fim.py @@ -1,11 +1,11 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK -from mistralai_gcp import models, utils -from mistralai_gcp._hooks import HookContext -from mistralai_gcp.types import OptionalNullable, UNSET -from mistralai_gcp.utils import eventstreaming -from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response +from mistralai.gcp.client import models, utils +from mistralai.gcp.client._hooks import HookContext +from mistralai.gcp.client.types import OptionalNullable, UNSET +from mistralai.gcp.client.utils import eventstreaming +from mistralai.gcp.client.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Dict, Mapping, Optional, Union diff --git a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py b/packages/gcp/src/mistralai/gcp/client/httpclient.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/httpclient.py rename to packages/gcp/src/mistralai/gcp/client/httpclient.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/gcp/src/mistralai/gcp/client/models/__init__.py similarity index 85% rename from packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py rename to packages/gcp/src/mistralai/gcp/client/models/__init__.py index fe85b133..fb446c25 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py +++ b/packages/gcp/src/mistralai/gcp/client/models/__init__.py @@ -1,6 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .mistralgcperror import MistralGcpError +from .mistralgcperror import MistralGCPError from typing import TYPE_CHECKING from importlib import import_module import builtins @@ -11,7 +11,6 @@ AssistantMessage, AssistantMessageContent, AssistantMessageContentTypedDict, - AssistantMessageRole, AssistantMessageTypedDict, ) from .chatcompletionchoice import ( @@ -21,8 +20,8 @@ ) from .chatcompletionrequest import ( ChatCompletionRequest, - ChatCompletionRequestMessages, - ChatCompletionRequestMessagesTypedDict, + ChatCompletionRequestMessage, + ChatCompletionRequestMessageTypedDict, ChatCompletionRequestStop, ChatCompletionRequestStopTypedDict, ChatCompletionRequestToolChoice, @@ -35,26 +34,26 @@ ) from .chatcompletionstreamrequest import ( ChatCompletionStreamRequest, + ChatCompletionStreamRequestMessage, + ChatCompletionStreamRequestMessageTypedDict, + ChatCompletionStreamRequestStop, + ChatCompletionStreamRequestStopTypedDict, ChatCompletionStreamRequestToolChoice, ChatCompletionStreamRequestToolChoiceTypedDict, ChatCompletionStreamRequestTypedDict, - Messages, - MessagesTypedDict, - Stop, - StopTypedDict, ) from .completionchunk import CompletionChunk, CompletionChunkTypedDict from .completionevent import CompletionEvent, CompletionEventTypedDict from .completionresponsestreamchoice import ( CompletionResponseStreamChoice, + CompletionResponseStreamChoiceFinishReason, CompletionResponseStreamChoiceTypedDict, - FinishReason, ) from .contentchunk import ContentChunk, ContentChunkTypedDict from .deltamessage import ( - Content, - ContentTypedDict, DeltaMessage, + DeltaMessageContent, + DeltaMessageContentTypedDict, DeltaMessageTypedDict, ) from .fimcompletionrequest import ( @@ -82,30 +81,25 @@ ) from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .imagedetail import ImageDetail from .imageurl import ImageURL, ImageURLTypedDict from .imageurlchunk import ( ImageURLChunk, - ImageURLChunkImageURL, - ImageURLChunkImageURLTypedDict, - ImageURLChunkType, ImageURLChunkTypedDict, + ImageURLUnion, + ImageURLUnionTypedDict, ) from .jsonschema import JSONSchema, JSONSchemaTypedDict from .mistralpromptmode import MistralPromptMode from .no_response_error import NoResponseError from .prediction import Prediction, PredictionTypedDict - from .referencechunk import ( - ReferenceChunk, - ReferenceChunkType, - ReferenceChunkTypedDict, - ) + from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats from .responsevalidationerror import ResponseValidationError from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .systemmessage import ( - Role, SystemMessage, SystemMessageContent, SystemMessageContentTypedDict, @@ -115,14 +109,8 @@ SystemMessageContentChunks, SystemMessageContentChunksTypedDict, ) - from .textchunk import TextChunk, TextChunkTypedDict, Type - from .thinkchunk import ( - ThinkChunk, - ThinkChunkType, - ThinkChunkTypedDict, - Thinking, - ThinkingTypedDict, - ) + from .textchunk import TextChunk, TextChunkTypedDict + from .thinkchunk import ThinkChunk, ThinkChunkTypedDict, Thinking, ThinkingTypedDict from .tool import Tool, ToolTypedDict from .toolcall import ToolCall, ToolCallTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict @@ -131,7 +119,6 @@ ToolMessage, ToolMessageContent, ToolMessageContentTypedDict, - ToolMessageRole, ToolMessageTypedDict, ) from .tooltypes import ToolTypes @@ -140,7 +127,6 @@ UserMessage, UserMessageContent, UserMessageContentTypedDict, - UserMessageRole, UserMessageTypedDict, ) from .validationerror import ( @@ -156,14 +142,13 @@ "AssistantMessage", "AssistantMessageContent", "AssistantMessageContentTypedDict", - "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", - "ChatCompletionRequestMessages", - "ChatCompletionRequestMessagesTypedDict", + "ChatCompletionRequestMessage", + "ChatCompletionRequestMessageTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", @@ -172,6 +157,10 @@ "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", + "ChatCompletionStreamRequestMessage", + "ChatCompletionStreamRequestMessageTypedDict", + "ChatCompletionStreamRequestStop", + "ChatCompletionStreamRequestStopTypedDict", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestToolChoiceTypedDict", "ChatCompletionStreamRequestTypedDict", @@ -180,12 +169,13 @@ "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", + "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", - "Content", "ContentChunk", "ContentChunkTypedDict", - "ContentTypedDict", "DeltaMessage", + "DeltaMessageContent", + "DeltaMessageContentTypedDict", "DeltaMessageTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", @@ -197,7 +187,6 @@ "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", - "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", @@ -206,37 +195,31 @@ "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", + "ImageDetail", "ImageURL", "ImageURLChunk", - "ImageURLChunkImageURL", - "ImageURLChunkImageURLTypedDict", - "ImageURLChunkType", "ImageURLChunkTypedDict", "ImageURLTypedDict", + "ImageURLUnion", + "ImageURLUnionTypedDict", "JSONSchema", "JSONSchemaTypedDict", "Loc", "LocTypedDict", - "Messages", - "MessagesTypedDict", - "MistralGcpError", + "MistralGCPError", "MistralPromptMode", "NoResponseError", "Prediction", "PredictionTypedDict", "ReferenceChunk", - "ReferenceChunkType", "ReferenceChunkTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "ResponseValidationError", - "Role", "SDKError", "Security", "SecurityTypedDict", - "Stop", - "StopTypedDict", "SystemMessage", "SystemMessageContent", "SystemMessageContentChunks", @@ -246,7 +229,6 @@ "TextChunk", "TextChunkTypedDict", "ThinkChunk", - "ThinkChunkType", "ThinkChunkTypedDict", "Thinking", "ThinkingTypedDict", @@ -259,17 +241,14 @@ "ToolMessage", "ToolMessageContent", "ToolMessageContentTypedDict", - "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "ToolTypes", - "Type", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", - "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict", @@ -279,14 +258,13 @@ "AssistantMessage": ".assistantmessage", "AssistantMessageContent": ".assistantmessage", "AssistantMessageContentTypedDict": ".assistantmessage", - "AssistantMessageRole": ".assistantmessage", "AssistantMessageTypedDict": ".assistantmessage", "ChatCompletionChoice": ".chatcompletionchoice", "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", "ChatCompletionRequest": ".chatcompletionrequest", - "ChatCompletionRequestMessages": ".chatcompletionrequest", - "ChatCompletionRequestMessagesTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestMessage": ".chatcompletionrequest", + "ChatCompletionRequestMessageTypedDict": ".chatcompletionrequest", "ChatCompletionRequestStop": ".chatcompletionrequest", "ChatCompletionRequestStopTypedDict": ".chatcompletionrequest", "ChatCompletionRequestToolChoice": ".chatcompletionrequest", @@ -295,25 +273,25 @@ "ChatCompletionResponse": ".chatcompletionresponse", "ChatCompletionResponseTypedDict": ".chatcompletionresponse", "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessage": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessageTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStop": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStopTypedDict": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", - "Messages": ".chatcompletionstreamrequest", - "MessagesTypedDict": ".chatcompletionstreamrequest", - "Stop": ".chatcompletionstreamrequest", - "StopTypedDict": ".chatcompletionstreamrequest", "CompletionChunk": ".completionchunk", "CompletionChunkTypedDict": ".completionchunk", "CompletionEvent": ".completionevent", "CompletionEventTypedDict": ".completionevent", "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", - "FinishReason": ".completionresponsestreamchoice", "ContentChunk": ".contentchunk", "ContentChunkTypedDict": ".contentchunk", - "Content": ".deltamessage", - "ContentTypedDict": ".deltamessage", "DeltaMessage": ".deltamessage", + "DeltaMessageContent": ".deltamessage", + "DeltaMessageContentTypedDict": ".deltamessage", "DeltaMessageTypedDict": ".deltamessage", "FIMCompletionRequest": ".fimcompletionrequest", "FIMCompletionRequestStop": ".fimcompletionrequest", @@ -335,13 +313,13 @@ "FunctionNameTypedDict": ".functionname", "HTTPValidationError": ".httpvalidationerror", "HTTPValidationErrorData": ".httpvalidationerror", + "ImageDetail": ".imagedetail", "ImageURL": ".imageurl", "ImageURLTypedDict": ".imageurl", "ImageURLChunk": ".imageurlchunk", - "ImageURLChunkImageURL": ".imageurlchunk", - "ImageURLChunkImageURLTypedDict": ".imageurlchunk", - "ImageURLChunkType": ".imageurlchunk", "ImageURLChunkTypedDict": ".imageurlchunk", + "ImageURLUnion": ".imageurlchunk", + "ImageURLUnionTypedDict": ".imageurlchunk", "JSONSchema": ".jsonschema", "JSONSchemaTypedDict": ".jsonschema", "MistralPromptMode": ".mistralpromptmode", @@ -349,7 +327,6 @@ "Prediction": ".prediction", "PredictionTypedDict": ".prediction", "ReferenceChunk": ".referencechunk", - "ReferenceChunkType": ".referencechunk", "ReferenceChunkTypedDict": ".referencechunk", "ResponseFormat": ".responseformat", "ResponseFormatTypedDict": ".responseformat", @@ -358,7 +335,6 @@ "SDKError": ".sdkerror", "Security": ".security", "SecurityTypedDict": ".security", - "Role": ".systemmessage", "SystemMessage": ".systemmessage", "SystemMessageContent": ".systemmessage", "SystemMessageContentTypedDict": ".systemmessage", @@ -367,9 +343,7 @@ "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", "TextChunk": ".textchunk", "TextChunkTypedDict": ".textchunk", - "Type": ".textchunk", "ThinkChunk": ".thinkchunk", - "ThinkChunkType": ".thinkchunk", "ThinkChunkTypedDict": ".thinkchunk", "Thinking": ".thinkchunk", "ThinkingTypedDict": ".thinkchunk", @@ -383,7 +357,6 @@ "ToolMessage": ".toolmessage", "ToolMessageContent": ".toolmessage", "ToolMessageContentTypedDict": ".toolmessage", - "ToolMessageRole": ".toolmessage", "ToolMessageTypedDict": ".toolmessage", "ToolTypes": ".tooltypes", "UsageInfo": ".usageinfo", @@ -391,7 +364,6 @@ "UserMessage": ".usermessage", "UserMessageContent": ".usermessage", "UserMessageContentTypedDict": ".usermessage", - "UserMessageRole": ".usermessage", "UserMessageTypedDict": ".usermessage", "Loc": ".validationerror", "LocTypedDict": ".validationerror", diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py similarity index 81% rename from packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py rename to packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py index 7790eb10..7061775b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py @@ -3,16 +3,19 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_azure.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, ) +from mistralai.gcp.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict AssistantMessageContentTypedDict = TypeAliasType( @@ -25,18 +28,22 @@ ) -AssistantMessageRole = Literal["assistant",] - - class AssistantMessageTypedDict(TypedDict): + role: Literal["assistant"] content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - role: NotRequired[AssistantMessageRole] class AssistantMessage(BaseModel): + ROLE: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" + content: OptionalNullable[AssistantMessageContent] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET @@ -44,11 +51,9 @@ class AssistantMessage(BaseModel): prefix: Optional[bool] = False r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - role: Optional[AssistantMessageRole] = "assistant" - @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["content", "tool_calls", "prefix", "role"] + optional_fields = ["role", "content", "tool_calls", "prefix"] nullable_fields = ["content", "tool_calls"] null_default_fields = [] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionchoice.py similarity index 91% rename from packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py rename to packages/gcp/src/mistralai/gcp/client/models/chatcompletionchoice.py index fe3ee952..ae5a2fbf 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionchoice.py @@ -2,7 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from mistralai_gcp.types import BaseModel, UnrecognizedStr +from mistralai.gcp.client.types import BaseModel, UnrecognizedStr from typing import Literal, Union from typing_extensions import TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py similarity index 97% rename from packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py rename to packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py index 80345f9d..1bc03922 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py @@ -11,14 +11,14 @@ from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_gcp.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, ) -from mistralai_gcp.utils import get_discriminator +from mistralai.gcp.client.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -36,8 +36,8 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -ChatCompletionRequestMessagesTypedDict = TypeAliasType( - "ChatCompletionRequestMessagesTypedDict", +ChatCompletionRequestMessageTypedDict = TypeAliasType( + "ChatCompletionRequestMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -47,7 +47,7 @@ ) -ChatCompletionRequestMessages = Annotated[ +ChatCompletionRequestMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -74,7 +74,7 @@ class ChatCompletionRequestTypedDict(TypedDict): model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[ChatCompletionRequestMessagesTypedDict] + messages: List[ChatCompletionRequestMessageTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: NotRequired[Nullable[float]] r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" @@ -113,7 +113,7 @@ class ChatCompletionRequest(BaseModel): model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[ChatCompletionRequestMessages] + messages: List[ChatCompletionRequestMessage] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: OptionalNullable[float] = UNSET diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionresponse.py similarity index 93% rename from packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py rename to packages/gcp/src/mistralai/gcp/client/models/chatcompletionresponse.py index a7953eb1..317c4d84 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionresponse.py @@ -3,7 +3,7 @@ from __future__ import annotations from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai_gcp.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing import List from typing_extensions import TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py similarity index 94% rename from packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py rename to packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py index e857d515..0a5a0021 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py @@ -11,29 +11,33 @@ from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_gcp.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, ) -from mistralai_gcp.utils import get_discriminator +from mistralai.gcp.client.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]]) +ChatCompletionStreamRequestStopTypedDict = TypeAliasType( + "ChatCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -Stop = TypeAliasType("Stop", Union[str, List[str]]) +ChatCompletionStreamRequestStop = TypeAliasType( + "ChatCompletionStreamRequestStop", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -MessagesTypedDict = TypeAliasType( - "MessagesTypedDict", +ChatCompletionStreamRequestMessageTypedDict = TypeAliasType( + "ChatCompletionStreamRequestMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -43,7 +47,7 @@ ) -Messages = Annotated[ +ChatCompletionStreamRequestMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -70,7 +74,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[MessagesTypedDict] + messages: List[ChatCompletionStreamRequestMessageTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: NotRequired[Nullable[float]] r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" @@ -79,7 +83,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" stream: NotRequired[bool] - stop: NotRequired[StopTypedDict] + stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" @@ -108,7 +112,7 @@ class ChatCompletionStreamRequest(BaseModel): model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[Messages] + messages: List[ChatCompletionStreamRequestMessage] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: OptionalNullable[float] = UNSET @@ -122,7 +126,7 @@ class ChatCompletionStreamRequest(BaseModel): stream: Optional[bool] = True - stop: Optional[Stop] = None + stop: Optional[ChatCompletionStreamRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: OptionalNullable[int] = UNSET diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py b/packages/gcp/src/mistralai/gcp/client/models/completionchunk.py similarity index 94% rename from packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py rename to packages/gcp/src/mistralai/gcp/client/models/completionchunk.py index d6cc2a86..9e54cb6d 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py +++ b/packages/gcp/src/mistralai/gcp/client/models/completionchunk.py @@ -6,7 +6,7 @@ CompletionResponseStreamChoiceTypedDict, ) from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai_azure.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing import List, Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py b/packages/gcp/src/mistralai/gcp/client/models/completionevent.py similarity index 88% rename from packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py rename to packages/gcp/src/mistralai/gcp/client/models/completionevent.py index 33278c11..bb155009 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py +++ b/packages/gcp/src/mistralai/gcp/client/models/completionevent.py @@ -2,7 +2,7 @@ from __future__ import annotations from .completionchunk import CompletionChunk, CompletionChunkTypedDict -from mistralai_gcp.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing_extensions import TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py b/packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py similarity index 82% rename from packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py rename to packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py index 0e890aac..6f306721 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py +++ b/packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py @@ -2,13 +2,18 @@ from __future__ import annotations from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + UNSET_SENTINEL, + UnrecognizedStr, +) from pydantic import model_serializer from typing import Literal, Union from typing_extensions import TypedDict -FinishReason = Union[ +CompletionResponseStreamChoiceFinishReason = Union[ Literal[ "stop", "length", @@ -22,7 +27,7 @@ class CompletionResponseStreamChoiceTypedDict(TypedDict): index: int delta: DeltaMessageTypedDict - finish_reason: Nullable[FinishReason] + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] class CompletionResponseStreamChoice(BaseModel): @@ -30,7 +35,7 @@ class CompletionResponseStreamChoice(BaseModel): delta: DeltaMessage - finish_reason: Nullable[FinishReason] + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py b/packages/gcp/src/mistralai/gcp/client/models/contentchunk.py similarity index 93% rename from packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py rename to packages/gcp/src/mistralai/gcp/client/models/contentchunk.py index da5671e3..1cd9e502 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py +++ b/packages/gcp/src/mistralai/gcp/client/models/contentchunk.py @@ -4,7 +4,7 @@ from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from mistralai_gcp.utils import get_discriminator +from mistralai.gcp.client.utils import get_discriminator from pydantic import Discriminator, Tag from typing import Union from typing_extensions import Annotated, TypeAliasType diff --git a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py b/packages/gcp/src/mistralai/gcp/client/models/deltamessage.py similarity index 81% rename from packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py rename to packages/gcp/src/mistralai/gcp/client/models/deltamessage.py index 7fa3c3f2..96923518 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/deltamessage.py @@ -3,7 +3,7 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_azure.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, @@ -15,24 +15,26 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ContentTypedDict = TypeAliasType( - "ContentTypedDict", Union[str, List[ContentChunkTypedDict]] +DeltaMessageContentTypedDict = TypeAliasType( + "DeltaMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] ) -Content = TypeAliasType("Content", Union[str, List[ContentChunk]]) +DeltaMessageContent = TypeAliasType( + "DeltaMessageContent", Union[str, List[ContentChunk]] +) class DeltaMessageTypedDict(TypedDict): role: NotRequired[Nullable[str]] - content: NotRequired[Nullable[ContentTypedDict]] + content: NotRequired[Nullable[DeltaMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] class DeltaMessage(BaseModel): role: OptionalNullable[str] = UNSET - content: OptionalNullable[Content] = UNSET + content: OptionalNullable[DeltaMessageContent] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py similarity index 99% rename from packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py rename to packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py index bcc97c90..f37bbcc3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionresponse.py similarity index 93% rename from packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py rename to packages/gcp/src/mistralai/gcp/client/models/fimcompletionresponse.py index e1940b0a..5b80da3f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py +++ b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionresponse.py @@ -3,7 +3,7 @@ from __future__ import annotations from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai_gcp.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing import List from typing_extensions import TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py similarity index 99% rename from packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py rename to packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py index 34d2ba65..8e610261 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/function.py b/packages/gcp/src/mistralai/gcp/client/models/function.py similarity index 91% rename from packages/mistralai_azure/src/mistralai_azure/models/function.py rename to packages/gcp/src/mistralai/gcp/client/models/function.py index a4642f92..28577eff 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/function.py +++ b/packages/gcp/src/mistralai/gcp/client/models/function.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing import Any, Dict, Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py b/packages/gcp/src/mistralai/gcp/client/models/functioncall.py similarity index 91% rename from packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py rename to packages/gcp/src/mistralai/gcp/client/models/functioncall.py index 99554c88..0f1b2425 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py +++ b/packages/gcp/src/mistralai/gcp/client/models/functioncall.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing import Any, Dict, Union from typing_extensions import TypeAliasType, TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py b/packages/gcp/src/mistralai/gcp/client/models/functionname.py similarity index 89% rename from packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py rename to packages/gcp/src/mistralai/gcp/client/models/functionname.py index 00ec22f5..585b9e39 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py +++ b/packages/gcp/src/mistralai/gcp/client/models/functionname.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing_extensions import TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py b/packages/gcp/src/mistralai/gcp/client/models/httpvalidationerror.py similarity index 82% rename from packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py rename to packages/gcp/src/mistralai/gcp/client/models/httpvalidationerror.py index 79609351..57df7260 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py +++ b/packages/gcp/src/mistralai/gcp/client/models/httpvalidationerror.py @@ -4,8 +4,8 @@ from .validationerror import ValidationError from dataclasses import dataclass, field import httpx -from mistralai_gcp.models import MistralGcpError -from mistralai_gcp.types import BaseModel +from mistralai.gcp.client.models import MistralGCPError +from mistralai.gcp.client.types import BaseModel from typing import List, Optional @@ -14,7 +14,7 @@ class HTTPValidationErrorData(BaseModel): @dataclass(unsafe_hash=True) -class HTTPValidationError(MistralGcpError): +class HTTPValidationError(MistralGCPError): data: HTTPValidationErrorData = field(hash=False) def __init__( diff --git a/packages/gcp/src/mistralai/gcp/client/models/imagedetail.py b/packages/gcp/src/mistralai/gcp/client/models/imagedetail.py new file mode 100644 index 00000000..68ed7608 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/imagedetail.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import UnrecognizedStr +from typing import Literal, Union + + +ImageDetail = Union[ + Literal[ + "low", + "auto", + "high", + ], + UnrecognizedStr, +] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/imageurl.py b/packages/gcp/src/mistralai/gcp/client/models/imageurl.py similarity index 88% rename from packages/mistralai_azure/src/mistralai_azure/models/imageurl.py rename to packages/gcp/src/mistralai/gcp/client/models/imageurl.py index a5a66360..d4f298f1 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/imageurl.py +++ b/packages/gcp/src/mistralai/gcp/client/models/imageurl.py @@ -1,7 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import ( +from .imagedetail import ImageDetail +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, @@ -14,13 +15,13 @@ class ImageURLTypedDict(TypedDict): url: str - detail: NotRequired[Nullable[str]] + detail: NotRequired[Nullable[ImageDetail]] class ImageURL(BaseModel): url: str - detail: OptionalNullable[str] = UNSET + detail: OptionalNullable[ImageDetail] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py b/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py new file mode 100644 index 00000000..fc5284c1 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py @@ -0,0 +1,36 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imageurl import ImageURL, ImageURLTypedDict +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +ImageURLUnionTypedDict = TypeAliasType( + "ImageURLUnionTypedDict", Union[ImageURLTypedDict, str] +) + + +ImageURLUnion = TypeAliasType("ImageURLUnion", Union[ImageURL, str]) + + +class ImageURLChunkTypedDict(TypedDict): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnionTypedDict + type: Literal["image_url"] + + +class ImageURLChunk(BaseModel): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnion + + TYPE: Annotated[ + Annotated[Literal["image_url"], AfterValidator(validate_const("image_url"))], + pydantic.Field(alias="type"), + ] = "image_url" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py b/packages/gcp/src/mistralai/gcp/client/models/jsonschema.py similarity index 97% rename from packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py rename to packages/gcp/src/mistralai/gcp/client/models/jsonschema.py index 26914b2f..443c429d 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py +++ b/packages/gcp/src/mistralai/gcp/client/models/jsonschema.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralgcperror.py b/packages/gcp/src/mistralai/gcp/client/models/mistralgcperror.py similarity index 96% rename from packages/mistralai_gcp/src/mistralai_gcp/models/mistralgcperror.py rename to packages/gcp/src/mistralai/gcp/client/models/mistralgcperror.py index fec729a5..9de91bf2 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralgcperror.py +++ b/packages/gcp/src/mistralai/gcp/client/models/mistralgcperror.py @@ -6,7 +6,7 @@ @dataclass(unsafe_hash=True) -class MistralGcpError(Exception): +class MistralGCPError(Exception): """The base class for all HTTP error responses.""" message: str diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py b/packages/gcp/src/mistralai/gcp/client/models/mistralpromptmode.py similarity index 89% rename from packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py rename to packages/gcp/src/mistralai/gcp/client/models/mistralpromptmode.py index a5cc534f..c765e4f1 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py +++ b/packages/gcp/src/mistralai/gcp/client/models/mistralpromptmode.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import UnrecognizedStr +from mistralai.gcp.client.types import UnrecognizedStr from typing import Literal, Union diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/no_response_error.py b/packages/gcp/src/mistralai/gcp/client/models/no_response_error.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/models/no_response_error.py rename to packages/gcp/src/mistralai/gcp/client/models/no_response_error.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py b/packages/gcp/src/mistralai/gcp/client/models/prediction.py similarity index 89% rename from packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py rename to packages/gcp/src/mistralai/gcp/client/models/prediction.py index 36c87ab0..f53579ed 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py +++ b/packages/gcp/src/mistralai/gcp/client/models/prediction.py @@ -1,8 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel -from mistralai_gcp.utils import validate_const +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import validate_const import pydantic from pydantic.functional_validators import AfterValidator from typing import Literal, Optional diff --git a/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py b/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py new file mode 100644 index 00000000..274ea7f7 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class ReferenceChunkTypedDict(TypedDict): + reference_ids: List[int] + type: Literal["reference"] + + +class ReferenceChunk(BaseModel): + reference_ids: List[int] + + TYPE: Annotated[ + Annotated[ + Optional[Literal["reference"]], AfterValidator(validate_const("reference")) + ], + pydantic.Field(alias="type"), + ] = "reference" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py b/packages/gcp/src/mistralai/gcp/client/models/responseformat.py similarity index 98% rename from packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py rename to packages/gcp/src/mistralai/gcp/client/models/responseformat.py index 9fe5116c..34ae6b03 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py +++ b/packages/gcp/src/mistralai/gcp/client/models/responseformat.py @@ -3,7 +3,7 @@ from __future__ import annotations from .jsonschema import JSONSchema, JSONSchemaTypedDict from .responseformats import ResponseFormats -from mistralai_gcp.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py b/packages/gcp/src/mistralai/gcp/client/models/responseformats.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py rename to packages/gcp/src/mistralai/gcp/client/models/responseformats.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responsevalidationerror.py b/packages/gcp/src/mistralai/gcp/client/models/responsevalidationerror.py similarity index 86% rename from packages/mistralai_gcp/src/mistralai_gcp/models/responsevalidationerror.py rename to packages/gcp/src/mistralai/gcp/client/models/responsevalidationerror.py index ebd4f214..0e86ea6c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responsevalidationerror.py +++ b/packages/gcp/src/mistralai/gcp/client/models/responsevalidationerror.py @@ -4,11 +4,11 @@ from typing import Optional from dataclasses import dataclass -from mistralai_gcp.models import MistralGcpError +from mistralai.gcp.client.models import MistralGCPError @dataclass(unsafe_hash=True) -class ResponseValidationError(MistralGcpError): +class ResponseValidationError(MistralGCPError): """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" def __init__( diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py b/packages/gcp/src/mistralai/gcp/client/models/sdkerror.py similarity index 93% rename from packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py rename to packages/gcp/src/mistralai/gcp/client/models/sdkerror.py index 7f53bbcd..00bc1d99 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py +++ b/packages/gcp/src/mistralai/gcp/client/models/sdkerror.py @@ -4,13 +4,13 @@ from typing import Optional from dataclasses import dataclass -from mistralai_gcp.models import MistralGcpError +from mistralai.gcp.client.models import MistralGCPError MAX_MESSAGE_LEN = 10_000 @dataclass(unsafe_hash=True) -class SDKError(MistralGcpError): +class SDKError(MistralGCPError): """The fallback error class if no more specific error class is matched.""" def __init__( diff --git a/packages/mistralai_azure/src/mistralai_azure/models/security.py b/packages/gcp/src/mistralai/gcp/client/models/security.py similarity index 81% rename from packages/mistralai_azure/src/mistralai_azure/models/security.py rename to packages/gcp/src/mistralai/gcp/client/models/security.py index c1ae8313..10a469b5 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/security.py +++ b/packages/gcp/src/mistralai/gcp/client/models/security.py @@ -1,8 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel -from mistralai_azure.utils import FieldMetadata, SecurityMetadata +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import FieldMetadata, SecurityMetadata from typing_extensions import Annotated, TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py b/packages/gcp/src/mistralai/gcp/client/models/systemmessage.py similarity index 57% rename from packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py rename to packages/gcp/src/mistralai/gcp/client/models/systemmessage.py index f99bf4ff..a7d695a7 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/systemmessage.py @@ -5,9 +5,12 @@ SystemMessageContentChunks, SystemMessageContentChunksTypedDict, ) -from mistralai_azure.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict SystemMessageContentTypedDict = TypeAliasType( @@ -21,15 +24,15 @@ ) -Role = Literal["system",] - - class SystemMessageTypedDict(TypedDict): content: SystemMessageContentTypedDict - role: NotRequired[Role] + role: Literal["system"] class SystemMessage(BaseModel): content: SystemMessageContent - role: Optional[Role] = "system" + ROLE: Annotated[ + Annotated[Literal["system"], AfterValidator(validate_const("system"))], + pydantic.Field(alias="role"), + ] = "system" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/systemmessagecontentchunks.py b/packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py similarity index 66% rename from packages/mistralai_azure/src/mistralai_azure/models/systemmessagecontentchunks.py rename to packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py index 4615a16c..225f38b7 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/systemmessagecontentchunks.py +++ b/packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py @@ -3,8 +3,7 @@ from __future__ import annotations from .textchunk import TextChunk, TextChunkTypedDict from .thinkchunk import ThinkChunk, ThinkChunkTypedDict -from mistralai_azure.utils import get_discriminator -from pydantic import Discriminator, Tag +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType @@ -16,6 +15,5 @@ SystemMessageContentChunks = Annotated[ - Union[Annotated[TextChunk, Tag("text")], Annotated[ThinkChunk, Tag("thinking")]], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Union[TextChunk, ThinkChunk], Field(discriminator="TYPE") ] diff --git a/packages/gcp/src/mistralai/gcp/client/models/textchunk.py b/packages/gcp/src/mistralai/gcp/client/models/textchunk.py new file mode 100644 index 00000000..77576c9f --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/textchunk.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict + + +class TextChunkTypedDict(TypedDict): + text: str + type: Literal["text"] + + +class TextChunk(BaseModel): + text: str + + TYPE: Annotated[ + Annotated[Literal["text"], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/thinkchunk.py b/packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py similarity index 65% rename from packages/mistralai_azure/src/mistralai_azure/models/thinkchunk.py rename to packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py index f53a9f1a..b65fffb2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/thinkchunk.py +++ b/packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py @@ -3,9 +3,12 @@ from __future__ import annotations from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from mistralai_azure.types import BaseModel +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ThinkingTypedDict = TypeAliasType( @@ -16,20 +19,20 @@ Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) -ThinkChunkType = Literal["thinking",] - - class ThinkChunkTypedDict(TypedDict): thinking: List[ThinkingTypedDict] + type: Literal["thinking"] closed: NotRequired[bool] r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" - type: NotRequired[ThinkChunkType] class ThinkChunk(BaseModel): thinking: List[Thinking] + TYPE: Annotated[ + Annotated[Literal["thinking"], AfterValidator(validate_const("thinking"))], + pydantic.Field(alias="type"), + ] = "thinking" + closed: Optional[bool] = None r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" - - type: Optional[ThinkChunkType] = "thinking" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py b/packages/gcp/src/mistralai/gcp/client/models/tool.py similarity index 90% rename from packages/mistralai_gcp/src/mistralai_gcp/models/tool.py rename to packages/gcp/src/mistralai/gcp/client/models/tool.py index 800de633..d09c6854 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py +++ b/packages/gcp/src/mistralai/gcp/client/models/tool.py @@ -3,7 +3,7 @@ from __future__ import annotations from .function import Function, FunctionTypedDict from .tooltypes import ToolTypes -from mistralai_gcp.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing import Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py b/packages/gcp/src/mistralai/gcp/client/models/toolcall.py similarity index 92% rename from packages/mistralai_azure/src/mistralai_azure/models/toolcall.py rename to packages/gcp/src/mistralai/gcp/client/models/toolcall.py index 44fe8ec8..a1edf337 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py +++ b/packages/gcp/src/mistralai/gcp/client/models/toolcall.py @@ -3,7 +3,7 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes -from mistralai_azure.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing import Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py b/packages/gcp/src/mistralai/gcp/client/models/toolchoice.py similarity index 94% rename from packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py rename to packages/gcp/src/mistralai/gcp/client/models/toolchoice.py index 4a148330..de3828da 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py +++ b/packages/gcp/src/mistralai/gcp/client/models/toolchoice.py @@ -3,7 +3,7 @@ from __future__ import annotations from .functionname import FunctionName, FunctionNameTypedDict from .tooltypes import ToolTypes -from mistralai_gcp.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing import Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py b/packages/gcp/src/mistralai/gcp/client/models/toolchoiceenum.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py rename to packages/gcp/src/mistralai/gcp/client/models/toolchoiceenum.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py b/packages/gcp/src/mistralai/gcp/client/models/toolmessage.py similarity index 77% rename from packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py rename to packages/gcp/src/mistralai/gcp/client/models/toolmessage.py index d6aa2621..65b1d9d6 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/toolmessage.py @@ -2,16 +2,19 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai_gcp.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, ) +from mistralai.gcp.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ToolMessageContentTypedDict = TypeAliasType( @@ -22,28 +25,28 @@ ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) -ToolMessageRole = Literal["tool",] - - class ToolMessageTypedDict(TypedDict): content: Nullable[ToolMessageContentTypedDict] + role: Literal["tool"] tool_call_id: NotRequired[Nullable[str]] name: NotRequired[Nullable[str]] - role: NotRequired[ToolMessageRole] class ToolMessage(BaseModel): content: Nullable[ToolMessageContent] + ROLE: Annotated[ + Annotated[Literal["tool"], AfterValidator(validate_const("tool"))], + pydantic.Field(alias="role"), + ] = "tool" + tool_call_id: OptionalNullable[str] = UNSET name: OptionalNullable[str] = UNSET - role: Optional[ToolMessageRole] = "tool" - @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["tool_call_id", "name", "role"] + optional_fields = ["tool_call_id", "name"] nullable_fields = ["content", "tool_call_id", "name"] null_default_fields = [] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py b/packages/gcp/src/mistralai/gcp/client/models/tooltypes.py similarity index 78% rename from packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py rename to packages/gcp/src/mistralai/gcp/client/models/tooltypes.py index 638890c5..fd1aa13d 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py +++ b/packages/gcp/src/mistralai/gcp/client/models/tooltypes.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import UnrecognizedStr +from mistralai.gcp.client.types import UnrecognizedStr from typing import Literal, Union diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py b/packages/gcp/src/mistralai/gcp/client/models/usageinfo.py similarity index 98% rename from packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py rename to packages/gcp/src/mistralai/gcp/client/models/usageinfo.py index 59f36158..9b7207b1 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py +++ b/packages/gcp/src/mistralai/gcp/client/models/usageinfo.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py b/packages/gcp/src/mistralai/gcp/client/models/usermessage.py similarity index 73% rename from packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py rename to packages/gcp/src/mistralai/gcp/client/models/usermessage.py index 0168b452..c083e16d 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/usermessage.py @@ -2,10 +2,13 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.gcp.client.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.gcp.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict UserMessageContentTypedDict = TypeAliasType( @@ -16,22 +19,22 @@ UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) -UserMessageRole = Literal["user",] - - class UserMessageTypedDict(TypedDict): content: Nullable[UserMessageContentTypedDict] - role: NotRequired[UserMessageRole] + role: Literal["user"] class UserMessage(BaseModel): content: Nullable[UserMessageContent] - role: Optional[UserMessageRole] = "user" + ROLE: Annotated[ + Annotated[Literal["user"], AfterValidator(validate_const("user"))], + pydantic.Field(alias="role"), + ] = "user" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["role"] + optional_fields = [] nullable_fields = ["content"] null_default_fields = [] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py b/packages/gcp/src/mistralai/gcp/client/models/validationerror.py similarity index 90% rename from packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py rename to packages/gcp/src/mistralai/gcp/client/models/validationerror.py index 033d4b63..2d330e9a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py +++ b/packages/gcp/src/mistralai/gcp/client/models/validationerror.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing import List, Union from typing_extensions import TypeAliasType, TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/py.typed b/packages/gcp/src/mistralai/gcp/client/py.typed similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/py.typed rename to packages/gcp/src/mistralai/gcp/client/py.typed diff --git a/packages/gcp/src/mistralai/gcp/client/sdk.py b/packages/gcp/src/mistralai/gcp/client/sdk.py new file mode 100644 index 00000000..e6e83839 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/sdk.py @@ -0,0 +1,243 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients +from .sdkconfiguration import SDKConfiguration +from .utils.logger import Logger, get_default_logger +from .utils.retries import RetryConfig +import google.auth +import google.auth.credentials +import google.auth.transport.requests +import httpx +import importlib +from mistralai.gcp.client import models, utils +from mistralai.gcp.client._hooks import SDKHooks +from mistralai.gcp.client._hooks.registration import GCPVertexAIPathHook +from mistralai.gcp.client.types import OptionalNullable, UNSET +import sys +from typing import Callable, Dict, Optional, TYPE_CHECKING, cast +import weakref + +if TYPE_CHECKING: + from mistralai.gcp.client.chat import Chat + from mistralai.gcp.client.fim import Fim + + +class MistralGCP(BaseSDK): + r"""Mistral AI API: Dora OpenAPI schema + + Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + """ + + chat: "Chat" + r"""Chat Completion API.""" + fim: "Fim" + r"""Fill-in-the-middle API.""" + _sub_sdk_map = { + "chat": ("mistralai.gcp.client.chat", "Chat"), + "fim": ("mistralai.gcp.client.fim", "Fim"), + } + + def __init__( + self, + project_id: Optional[str] = None, + region: str = "europe-west4", + access_token: Optional[str] = None, + server: Optional[str] = None, + server_url: Optional[str] = None, + url_params: Optional[Dict[str, str]] = None, + client: Optional[HttpClient] = None, + async_client: Optional[AsyncHttpClient] = None, + retry_config: OptionalNullable[RetryConfig] = UNSET, + timeout_ms: Optional[int] = None, + debug_logger: Optional[Logger] = None, + ) -> None: + r"""Instantiates the SDK configuring it with the provided parameters. + + :param project_id: GCP project ID (auto-detected from credentials if not provided) + :param region: GCP region for Vertex AI (default: europe-west4) + :param access_token: Fixed access token for testing (skips google.auth) + :param server: The server by name to use for all methods + :param server_url: The server URL to use for all methods + :param url_params: Parameters to optionally template the server URL with + :param client: The HTTP client to use for all synchronous methods + :param async_client: The Async HTTP client to use for all asynchronous methods + :param retry_config: The retry configuration to use for all supported methods + :param timeout_ms: Optional request timeout applied to each operation in milliseconds + """ + credentials: Optional[google.auth.credentials.Credentials] = None + if access_token is None: + creds, detected_project_id = google.auth.default( + scopes=["https://www.googleapis.com/auth/cloud-platform"], + ) + if creds is None: + raise ValueError("Failed to obtain GCP credentials") + # Cast to Credentials base class which has refresh() and token + creds = cast(google.auth.credentials.Credentials, creds) + creds.refresh(google.auth.transport.requests.Request()) + credentials = creds + project_id = project_id or detected_project_id + + if project_id is None: + raise ValueError( + "project_id must be provided or available from default credentials" + ) + + self._credentials = credentials + self._project_id = project_id + self._region = region + self._fixed_access_token = access_token + + def get_auth_token() -> str: + if self._fixed_access_token: + return self._fixed_access_token + creds = self._credentials + if creds is None: + raise ValueError("No credentials available") + # Only refresh when the token is expired or missing. + # This avoids a blocking HTTP round-trip on every request and + # minimises event-loop blocking when called from async paths + # (the Speakeasy-generated basesdk always calls security + # callables synchronously). + if not creds.valid: + creds.refresh(google.auth.transport.requests.Request()) + token = creds.token + if token is None: + raise ValueError("Failed to obtain access token") + return token + + if server_url is None: + server_url = f"https://{region}-aiplatform.googleapis.com" + + client_supplied = True + if client is None: + client = httpx.Client(follow_redirects=True) + client_supplied = False + + assert issubclass( + type(client), HttpClient + ), "The provided client must implement the HttpClient protocol." + + async_client_supplied = True + if async_client is None: + async_client = httpx.AsyncClient(follow_redirects=True) + async_client_supplied = False + + if debug_logger is None: + debug_logger = get_default_logger() + + assert issubclass( + type(async_client), AsyncHttpClient + ), "The provided async_client must implement the AsyncHttpClient protocol." + + def get_security() -> models.Security: + return models.Security(api_key=get_auth_token()) + + security: Callable[[], models.Security] = get_security + + if url_params is not None: + server_url = utils.template_url(server_url, url_params) + + BaseSDK.__init__( + self, + SDKConfiguration( + client=client, + client_supplied=client_supplied, + async_client=async_client, + async_client_supplied=async_client_supplied, + security=security, + server_url=server_url, + server=server, + retry_config=retry_config, + timeout_ms=timeout_ms, + debug_logger=debug_logger, + ), + parent_ref=self, + ) + + hooks = SDKHooks() + self.sdk_configuration.__dict__["_hooks"] = hooks + + # Register hook that builds Vertex AI URL path + hooks.register_before_request_hook(GCPVertexAIPathHook(project_id, region)) + + current_server_url, *_ = self.sdk_configuration.get_server_details() + server_url, self.sdk_configuration.client = hooks.sdk_init( + current_server_url, client + ) + if current_server_url != server_url: + self.sdk_configuration.server_url = server_url + + weakref.finalize( + self, + close_clients, + cast(ClientOwner, self.sdk_configuration), + self.sdk_configuration.client, + self.sdk_configuration.client_supplied, + self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, + ) + + def dynamic_import(self, modname, retries=3): + last_exc: Optional[Exception] = None + for attempt in range(retries): + try: + return importlib.import_module(modname) + except (KeyError, ImportError, ModuleNotFoundError) as e: + last_exc = e + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise ImportError( + f"Failed to import module '{modname}' after {retries} attempts" + ) from last_exc + + def __getattr__(self, name: str): + if name in self._sub_sdk_map: + module_path, class_name = self._sub_sdk_map[name] + try: + module = self.dynamic_import(module_path) + klass = getattr(module, class_name) + instance = klass(self.sdk_configuration, parent_ref=self) + setattr(self, name, instance) + return instance + except ImportError as e: + raise AttributeError( + f"Failed to import module {module_path} for attribute {name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to find class {class_name} in module {module_path} for attribute {name}: {e}" + ) from e + + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{name}'" + ) + + def __dir__(self): + default_attrs = list(super().__dir__()) + lazy_attrs = list(self._sub_sdk_map.keys()) + return sorted(list(set(default_attrs + lazy_attrs))) + + def __enter__(self): + return self + + async def __aenter__(self): + return self + + def __exit__(self, _exc_type, _exc_val, _exc_tb): + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): + self.sdk_configuration.client.close() + self.sdk_configuration.client = None + + async def __aexit__(self, _exc_type, _exc_val, _exc_tb): + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): + await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/gcp/src/mistralai/gcp/client/sdkconfiguration.py similarity index 94% rename from packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py rename to packages/gcp/src/mistralai/gcp/client/sdkconfiguration.py index 51289cf0..d56a634f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/gcp/src/mistralai/gcp/client/sdkconfiguration.py @@ -9,8 +9,8 @@ from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix from dataclasses import dataclass -from mistralai_azure import models -from mistralai_azure.types import OptionalNullable, UNSET +from mistralai.gcp.client import models +from mistralai.gcp.client.types import OptionalNullable, UNSET from pydantic import Field from typing import Callable, Dict, Optional, Tuple, Union diff --git a/packages/mistralai_gcp/src/mistralai_gcp/types/__init__.py b/packages/gcp/src/mistralai/gcp/client/types/__init__.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/types/__init__.py rename to packages/gcp/src/mistralai/gcp/client/types/__init__.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py b/packages/gcp/src/mistralai/gcp/client/types/basemodel.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py rename to packages/gcp/src/mistralai/gcp/client/types/basemodel.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/gcp/src/mistralai/gcp/client/utils/__init__.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py rename to packages/gcp/src/mistralai/gcp/client/utils/__init__.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py b/packages/gcp/src/mistralai/gcp/client/utils/annotations.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py rename to packages/gcp/src/mistralai/gcp/client/utils/annotations.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/datetimes.py b/packages/gcp/src/mistralai/gcp/client/utils/datetimes.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/datetimes.py rename to packages/gcp/src/mistralai/gcp/client/utils/datetimes.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py b/packages/gcp/src/mistralai/gcp/client/utils/enums.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py rename to packages/gcp/src/mistralai/gcp/client/utils/enums.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py b/packages/gcp/src/mistralai/gcp/client/utils/eventstreaming.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py rename to packages/gcp/src/mistralai/gcp/client/utils/eventstreaming.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py b/packages/gcp/src/mistralai/gcp/client/utils/forms.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py rename to packages/gcp/src/mistralai/gcp/client/utils/forms.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/headers.py b/packages/gcp/src/mistralai/gcp/client/utils/headers.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/headers.py rename to packages/gcp/src/mistralai/gcp/client/utils/headers.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py b/packages/gcp/src/mistralai/gcp/client/utils/logger.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py rename to packages/gcp/src/mistralai/gcp/client/utils/logger.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/metadata.py b/packages/gcp/src/mistralai/gcp/client/utils/metadata.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/metadata.py rename to packages/gcp/src/mistralai/gcp/client/utils/metadata.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py b/packages/gcp/src/mistralai/gcp/client/utils/queryparams.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py rename to packages/gcp/src/mistralai/gcp/client/utils/queryparams.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py b/packages/gcp/src/mistralai/gcp/client/utils/requestbodies.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py rename to packages/gcp/src/mistralai/gcp/client/utils/requestbodies.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py b/packages/gcp/src/mistralai/gcp/client/utils/retries.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py rename to packages/gcp/src/mistralai/gcp/client/utils/retries.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py b/packages/gcp/src/mistralai/gcp/client/utils/security.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/security.py rename to packages/gcp/src/mistralai/gcp/client/utils/security.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py b/packages/gcp/src/mistralai/gcp/client/utils/serializers.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py rename to packages/gcp/src/mistralai/gcp/client/utils/serializers.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/unmarshal_json_response.py b/packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py similarity index 95% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/unmarshal_json_response.py rename to packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py index c168a293..83e8275e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/unmarshal_json_response.py +++ b/packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py @@ -5,7 +5,7 @@ import httpx from .serializers import unmarshal_json -from mistralai_gcp import models +from mistralai.gcp.client import models T = TypeVar("T") diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/url.py b/packages/gcp/src/mistralai/gcp/client/utils/url.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/url.py rename to packages/gcp/src/mistralai/gcp/client/utils/url.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py b/packages/gcp/src/mistralai/gcp/client/utils/values.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/values.py rename to packages/gcp/src/mistralai/gcp/client/utils/values.py diff --git a/packages/mistralai_gcp/uv.lock b/packages/gcp/uv.lock similarity index 92% rename from packages/mistralai_gcp/uv.lock rename to packages/gcp/uv.lock index 4fbca724..a49757c9 100644 --- a/packages/mistralai_gcp/uv.lock +++ b/packages/gcp/uv.lock @@ -277,7 +277,7 @@ wheels = [ [[package]] name = "mistralai-gcp" -version = "1.8.0" +version = "2.0.0a4" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, @@ -304,7 +304,7 @@ requires-dist = [ { name = "eval-type-backport", specifier = ">=0.2.0" }, { name = "google-auth", specifier = ">=2.31.0,<3.0.0" }, { name = "httpx", specifier = ">=0.28.1" }, - { name = "pydantic", specifier = ">=2.10.3" }, + { name = "pydantic", specifier = ">=2.11.2" }, { name = "python-dateutil", specifier = ">=2.8.2" }, { name = "requests", specifier = ">=2.32.3,<3.0.0" }, { name = "typing-inspection", specifier = ">=0.4.0" }, @@ -312,7 +312,7 @@ requires-dist = [ [package.metadata.requires-dev] dev = [ - { name = "mypy", specifier = "==1.14.1" }, + { name = "mypy", specifier = "==1.15.0" }, { name = "pylint", specifier = "==3.2.3" }, { name = "pyright", specifier = ">=1.1.401,<2" }, { name = "pytest", specifier = ">=8.2.2,<9" }, @@ -322,40 +322,40 @@ dev = [ [[package]] name = "mypy" -version = "1.14.1" +version = "1.15.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mypy-extensions" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b9/eb/2c92d8ea1e684440f54fa49ac5d9a5f19967b7b472a281f419e69a8d228e/mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6", size = 3216051, upload-time = "2024-12-30T16:39:07.335Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9b/7a/87ae2adb31d68402da6da1e5f30c07ea6063e9f09b5e7cfc9dfa44075e74/mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb", size = 11211002, upload-time = "2024-12-30T16:37:22.435Z" }, - { url = "https://files.pythonhosted.org/packages/e1/23/eada4c38608b444618a132be0d199b280049ded278b24cbb9d3fc59658e4/mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0", size = 10358400, upload-time = "2024-12-30T16:37:53.526Z" }, - { url = "https://files.pythonhosted.org/packages/43/c9/d6785c6f66241c62fd2992b05057f404237deaad1566545e9f144ced07f5/mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d", size = 12095172, upload-time = "2024-12-30T16:37:50.332Z" }, - { url = "https://files.pythonhosted.org/packages/c3/62/daa7e787770c83c52ce2aaf1a111eae5893de9e004743f51bfcad9e487ec/mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b", size = 12828732, upload-time = "2024-12-30T16:37:29.96Z" }, - { url = "https://files.pythonhosted.org/packages/1b/a2/5fb18318a3637f29f16f4e41340b795da14f4751ef4f51c99ff39ab62e52/mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427", size = 13012197, upload-time = "2024-12-30T16:38:05.037Z" }, - { url = "https://files.pythonhosted.org/packages/28/99/e153ce39105d164b5f02c06c35c7ba958aaff50a2babba7d080988b03fe7/mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f", size = 9780836, upload-time = "2024-12-30T16:37:19.726Z" }, - { url = "https://files.pythonhosted.org/packages/da/11/a9422850fd506edbcdc7f6090682ecceaf1f87b9dd847f9df79942da8506/mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c", size = 11120432, upload-time = "2024-12-30T16:37:11.533Z" }, - { url = "https://files.pythonhosted.org/packages/b6/9e/47e450fd39078d9c02d620545b2cb37993a8a8bdf7db3652ace2f80521ca/mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1", size = 10279515, upload-time = "2024-12-30T16:37:40.724Z" }, - { url = "https://files.pythonhosted.org/packages/01/b5/6c8d33bd0f851a7692a8bfe4ee75eb82b6983a3cf39e5e32a5d2a723f0c1/mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8", size = 12025791, upload-time = "2024-12-30T16:36:58.73Z" }, - { url = "https://files.pythonhosted.org/packages/f0/4c/e10e2c46ea37cab5c471d0ddaaa9a434dc1d28650078ac1b56c2d7b9b2e4/mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f", size = 12749203, upload-time = "2024-12-30T16:37:03.741Z" }, - { url = "https://files.pythonhosted.org/packages/88/55/beacb0c69beab2153a0f57671ec07861d27d735a0faff135a494cd4f5020/mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1", size = 12885900, upload-time = "2024-12-30T16:37:57.948Z" }, - { url = "https://files.pythonhosted.org/packages/a2/75/8c93ff7f315c4d086a2dfcde02f713004357d70a163eddb6c56a6a5eff40/mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae", size = 9777869, upload-time = "2024-12-30T16:37:33.428Z" }, - { url = "https://files.pythonhosted.org/packages/43/1b/b38c079609bb4627905b74fc6a49849835acf68547ac33d8ceb707de5f52/mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14", size = 11266668, upload-time = "2024-12-30T16:38:02.211Z" }, - { url = "https://files.pythonhosted.org/packages/6b/75/2ed0d2964c1ffc9971c729f7a544e9cd34b2cdabbe2d11afd148d7838aa2/mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9", size = 10254060, upload-time = "2024-12-30T16:37:46.131Z" }, - { url = "https://files.pythonhosted.org/packages/a1/5f/7b8051552d4da3c51bbe8fcafffd76a6823779101a2b198d80886cd8f08e/mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11", size = 11933167, upload-time = "2024-12-30T16:37:43.534Z" }, - { url = "https://files.pythonhosted.org/packages/04/90/f53971d3ac39d8b68bbaab9a4c6c58c8caa4d5fd3d587d16f5927eeeabe1/mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e", size = 12864341, upload-time = "2024-12-30T16:37:36.249Z" }, - { url = "https://files.pythonhosted.org/packages/03/d2/8bc0aeaaf2e88c977db41583559319f1821c069e943ada2701e86d0430b7/mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89", size = 12972991, upload-time = "2024-12-30T16:37:06.743Z" }, - { url = "https://files.pythonhosted.org/packages/6f/17/07815114b903b49b0f2cf7499f1c130e5aa459411596668267535fe9243c/mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b", size = 9879016, upload-time = "2024-12-30T16:37:15.02Z" }, - { url = "https://files.pythonhosted.org/packages/9e/15/bb6a686901f59222275ab228453de741185f9d54fecbaacec041679496c6/mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255", size = 11252097, upload-time = "2024-12-30T16:37:25.144Z" }, - { url = "https://files.pythonhosted.org/packages/f8/b3/8b0f74dfd072c802b7fa368829defdf3ee1566ba74c32a2cb2403f68024c/mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34", size = 10239728, upload-time = "2024-12-30T16:38:08.634Z" }, - { url = "https://files.pythonhosted.org/packages/c5/9b/4fd95ab20c52bb5b8c03cc49169be5905d931de17edfe4d9d2986800b52e/mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a", size = 11924965, upload-time = "2024-12-30T16:38:12.132Z" }, - { url = "https://files.pythonhosted.org/packages/56/9d/4a236b9c57f5d8f08ed346914b3f091a62dd7e19336b2b2a0d85485f82ff/mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9", size = 12867660, upload-time = "2024-12-30T16:38:17.342Z" }, - { url = "https://files.pythonhosted.org/packages/40/88/a61a5497e2f68d9027de2bb139c7bb9abaeb1be1584649fa9d807f80a338/mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd", size = 12969198, upload-time = "2024-12-30T16:38:32.839Z" }, - { url = "https://files.pythonhosted.org/packages/54/da/3d6fc5d92d324701b0c23fb413c853892bfe0e1dbe06c9138037d459756b/mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107", size = 9885276, upload-time = "2024-12-30T16:38:20.828Z" }, - { url = "https://files.pythonhosted.org/packages/a0/b5/32dd67b69a16d088e533962e5044e51004176a9952419de0370cdaead0f8/mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1", size = 2752905, upload-time = "2024-12-30T16:38:42.021Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/ce/43/d5e49a86afa64bd3839ea0d5b9c7103487007d728e1293f52525d6d5486a/mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43", size = 3239717, upload-time = "2025-02-05T03:50:34.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/f8/65a7ce8d0e09b6329ad0c8d40330d100ea343bd4dd04c4f8ae26462d0a17/mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13", size = 10738433, upload-time = "2025-02-05T03:49:29.145Z" }, + { url = "https://files.pythonhosted.org/packages/b4/95/9c0ecb8eacfe048583706249439ff52105b3f552ea9c4024166c03224270/mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559", size = 9861472, upload-time = "2025-02-05T03:49:16.986Z" }, + { url = "https://files.pythonhosted.org/packages/84/09/9ec95e982e282e20c0d5407bc65031dfd0f0f8ecc66b69538296e06fcbee/mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b", size = 11611424, upload-time = "2025-02-05T03:49:46.908Z" }, + { url = "https://files.pythonhosted.org/packages/78/13/f7d14e55865036a1e6a0a69580c240f43bc1f37407fe9235c0d4ef25ffb0/mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3", size = 12365450, upload-time = "2025-02-05T03:50:05.89Z" }, + { url = "https://files.pythonhosted.org/packages/48/e1/301a73852d40c241e915ac6d7bcd7fedd47d519246db2d7b86b9d7e7a0cb/mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b", size = 12551765, upload-time = "2025-02-05T03:49:33.56Z" }, + { url = "https://files.pythonhosted.org/packages/77/ba/c37bc323ae5fe7f3f15a28e06ab012cd0b7552886118943e90b15af31195/mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828", size = 9274701, upload-time = "2025-02-05T03:49:38.981Z" }, + { url = "https://files.pythonhosted.org/packages/03/bc/f6339726c627bd7ca1ce0fa56c9ae2d0144604a319e0e339bdadafbbb599/mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f", size = 10662338, upload-time = "2025-02-05T03:50:17.287Z" }, + { url = "https://files.pythonhosted.org/packages/e2/90/8dcf506ca1a09b0d17555cc00cd69aee402c203911410136cd716559efe7/mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5", size = 9787540, upload-time = "2025-02-05T03:49:51.21Z" }, + { url = "https://files.pythonhosted.org/packages/05/05/a10f9479681e5da09ef2f9426f650d7b550d4bafbef683b69aad1ba87457/mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e", size = 11538051, upload-time = "2025-02-05T03:50:20.885Z" }, + { url = "https://files.pythonhosted.org/packages/e9/9a/1f7d18b30edd57441a6411fcbc0c6869448d1a4bacbaee60656ac0fc29c8/mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c", size = 12286751, upload-time = "2025-02-05T03:49:42.408Z" }, + { url = "https://files.pythonhosted.org/packages/72/af/19ff499b6f1dafcaf56f9881f7a965ac2f474f69f6f618b5175b044299f5/mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f", size = 12421783, upload-time = "2025-02-05T03:49:07.707Z" }, + { url = "https://files.pythonhosted.org/packages/96/39/11b57431a1f686c1aed54bf794870efe0f6aeca11aca281a0bd87a5ad42c/mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f", size = 9265618, upload-time = "2025-02-05T03:49:54.581Z" }, + { url = "https://files.pythonhosted.org/packages/98/3a/03c74331c5eb8bd025734e04c9840532226775c47a2c39b56a0c8d4f128d/mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd", size = 10793981, upload-time = "2025-02-05T03:50:28.25Z" }, + { url = "https://files.pythonhosted.org/packages/f0/1a/41759b18f2cfd568848a37c89030aeb03534411eef981df621d8fad08a1d/mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f", size = 9749175, upload-time = "2025-02-05T03:50:13.411Z" }, + { url = "https://files.pythonhosted.org/packages/12/7e/873481abf1ef112c582db832740f4c11b2bfa510e829d6da29b0ab8c3f9c/mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464", size = 11455675, upload-time = "2025-02-05T03:50:31.421Z" }, + { url = "https://files.pythonhosted.org/packages/b3/d0/92ae4cde706923a2d3f2d6c39629134063ff64b9dedca9c1388363da072d/mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee", size = 12410020, upload-time = "2025-02-05T03:48:48.705Z" }, + { url = "https://files.pythonhosted.org/packages/46/8b/df49974b337cce35f828ba6fda228152d6db45fed4c86ba56ffe442434fd/mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e", size = 12498582, upload-time = "2025-02-05T03:49:03.628Z" }, + { url = "https://files.pythonhosted.org/packages/13/50/da5203fcf6c53044a0b699939f31075c45ae8a4cadf538a9069b165c1050/mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22", size = 9366614, upload-time = "2025-02-05T03:50:00.313Z" }, + { url = "https://files.pythonhosted.org/packages/6a/9b/fd2e05d6ffff24d912f150b87db9e364fa8282045c875654ce7e32fffa66/mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445", size = 10788592, upload-time = "2025-02-05T03:48:55.789Z" }, + { url = "https://files.pythonhosted.org/packages/74/37/b246d711c28a03ead1fd906bbc7106659aed7c089d55fe40dd58db812628/mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d", size = 9753611, upload-time = "2025-02-05T03:48:44.581Z" }, + { url = "https://files.pythonhosted.org/packages/a6/ac/395808a92e10cfdac8003c3de9a2ab6dc7cde6c0d2a4df3df1b815ffd067/mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5", size = 11438443, upload-time = "2025-02-05T03:49:25.514Z" }, + { url = "https://files.pythonhosted.org/packages/d2/8b/801aa06445d2de3895f59e476f38f3f8d610ef5d6908245f07d002676cbf/mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036", size = 12402541, upload-time = "2025-02-05T03:49:57.623Z" }, + { url = "https://files.pythonhosted.org/packages/c7/67/5a4268782eb77344cc613a4cf23540928e41f018a9a1ec4c6882baf20ab8/mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357", size = 12494348, upload-time = "2025-02-05T03:48:52.361Z" }, + { url = "https://files.pythonhosted.org/packages/83/3e/57bb447f7bbbfaabf1712d96f9df142624a386d98fb026a761532526057e/mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf", size = 9373648, upload-time = "2025-02-05T03:49:11.395Z" }, + { url = "https://files.pythonhosted.org/packages/09/4e/a7d65c7322c510de2c409ff3828b03354a7c43f5a8ed458a7a131b41c7b9/mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e", size = 2221777, upload-time = "2025-02-05T03:50:08.348Z" }, ] [[package]] diff --git a/packages/mistralai_azure/.genignore b/packages/mistralai_azure/.genignore deleted file mode 100644 index ba7f2350..00000000 --- a/packages/mistralai_azure/.genignore +++ /dev/null @@ -1,5 +0,0 @@ -pyproject.toml -src/mistralai_azure/sdk.py -README.md -USAGE.md -docs/sdks/**/README.md \ No newline at end of file diff --git a/packages/mistralai_azure/.vscode/settings.json b/packages/mistralai_azure/.vscode/settings.json deleted file mode 100644 index 8d79f0ab..00000000 --- a/packages/mistralai_azure/.vscode/settings.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "python.testing.pytestArgs": ["tests", "-vv"], - "python.testing.unittestEnabled": false, - "python.testing.pytestEnabled": true, - "pylint.args": ["--rcfile=pylintrc"] -} diff --git a/packages/mistralai_azure/USAGE.md b/packages/mistralai_azure/USAGE.md deleted file mode 100644 index 0ccf3d70..00000000 --- a/packages/mistralai_azure/USAGE.md +++ /dev/null @@ -1,55 +0,0 @@ - -### Create Chat Completions - -This example shows how to create chat completions. - -```python -# Synchronous Example -from mistralai_azure import MistralAzure -import os - -s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") -) - - -res = s.chat.complete(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") - -if res is not None: - # handle response - pass -``` - -
- -The same SDK client can also be used to make asychronous requests by importing asyncio. -```python -# Asynchronous Example -import asyncio -from mistralai_azure import MistralAzure -import os - -async def main(): - s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") - ) - res = await s.chat.complete_async(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], model="azureai") - if res is not None: - # handle response - pass - -asyncio.run(main()) -``` - \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/assistantmessagerole.md b/packages/mistralai_azure/docs/models/assistantmessagerole.md deleted file mode 100644 index 658229e7..00000000 --- a/packages/mistralai_azure/docs/models/assistantmessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# AssistantMessageRole - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/completionresponsestreamchoice.md b/packages/mistralai_azure/docs/models/completionresponsestreamchoice.md deleted file mode 100644 index c807dacd..00000000 --- a/packages/mistralai_azure/docs/models/completionresponsestreamchoice.md +++ /dev/null @@ -1,10 +0,0 @@ -# CompletionResponseStreamChoice - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | -| `index` | *int* | :heavy_check_mark: | N/A | -| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | -| `finish_reason` | [Nullable[models.FinishReason]](../models/finishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/deltamessage.md b/packages/mistralai_azure/docs/models/deltamessage.md deleted file mode 100644 index 61deabbf..00000000 --- a/packages/mistralai_azure/docs/models/deltamessage.md +++ /dev/null @@ -1,10 +0,0 @@ -# DeltaMessage - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `content` | [OptionalNullable[models.Content]](../models/content.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/documenturlchunk.md b/packages/mistralai_azure/docs/models/documenturlchunk.md deleted file mode 100644 index 6c9a5b4d..00000000 --- a/packages/mistralai_azure/docs/models/documenturlchunk.md +++ /dev/null @@ -1,10 +0,0 @@ -# DocumentURLChunk - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -| `document_url` | *str* | :heavy_check_mark: | N/A | -| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | -| `type` | [Optional[models.DocumentURLChunkType]](../models/documenturlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/documenturlchunktype.md b/packages/mistralai_azure/docs/models/documenturlchunktype.md deleted file mode 100644 index 32e1fa9e..00000000 --- a/packages/mistralai_azure/docs/models/documenturlchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# DocumentURLChunkType - - -## Values - -| Name | Value | -| -------------- | -------------- | -| `DOCUMENT_URL` | document_url | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/imageurl.md b/packages/mistralai_azure/docs/models/imageurl.md deleted file mode 100644 index 7c2bcbc3..00000000 --- a/packages/mistralai_azure/docs/models/imageurl.md +++ /dev/null @@ -1,9 +0,0 @@ -# ImageURL - - -## Fields - -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `url` | *str* | :heavy_check_mark: | N/A | -| `detail` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/imageurlchunk.md b/packages/mistralai_azure/docs/models/imageurlchunk.md deleted file mode 100644 index f1b926ef..00000000 --- a/packages/mistralai_azure/docs/models/imageurlchunk.md +++ /dev/null @@ -1,11 +0,0 @@ -# ImageURLChunk - -{"type":"image_url","image_url":{"url":"data:image/png;base64,iVBORw0 - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `image_url` | [models.ImageURLChunkImageURL](../models/imageurlchunkimageurl.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ImageURLChunkType]](../models/imageurlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/imageurlchunktype.md b/packages/mistralai_azure/docs/models/imageurlchunktype.md deleted file mode 100644 index 2064a0b4..00000000 --- a/packages/mistralai_azure/docs/models/imageurlchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ImageURLChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `IMAGE_URL` | image_url | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/referencechunk.md b/packages/mistralai_azure/docs/models/referencechunk.md deleted file mode 100644 index a132ca2f..00000000 --- a/packages/mistralai_azure/docs/models/referencechunk.md +++ /dev/null @@ -1,9 +0,0 @@ -# ReferenceChunk - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ReferenceChunkType]](../models/referencechunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/referencechunktype.md b/packages/mistralai_azure/docs/models/referencechunktype.md deleted file mode 100644 index 1e0e2fe6..00000000 --- a/packages/mistralai_azure/docs/models/referencechunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ReferenceChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `REFERENCE` | reference | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/role.md b/packages/mistralai_azure/docs/models/role.md deleted file mode 100644 index affca78d..00000000 --- a/packages/mistralai_azure/docs/models/role.md +++ /dev/null @@ -1,8 +0,0 @@ -# Role - - -## Values - -| Name | Value | -| -------- | -------- | -| `SYSTEM` | system | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/textchunk.md b/packages/mistralai_azure/docs/models/textchunk.md deleted file mode 100644 index 6daab3c3..00000000 --- a/packages/mistralai_azure/docs/models/textchunk.md +++ /dev/null @@ -1,9 +0,0 @@ -# TextChunk - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `text` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/thinkchunktype.md b/packages/mistralai_azure/docs/models/thinkchunktype.md deleted file mode 100644 index baf6f755..00000000 --- a/packages/mistralai_azure/docs/models/thinkchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ThinkChunkType - - -## Values - -| Name | Value | -| ---------- | ---------- | -| `THINKING` | thinking | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolmessagerole.md b/packages/mistralai_azure/docs/models/toolmessagerole.md deleted file mode 100644 index c24e59c0..00000000 --- a/packages/mistralai_azure/docs/models/toolmessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolMessageRole - - -## Values - -| Name | Value | -| ------ | ------ | -| `TOOL` | tool | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/type.md b/packages/mistralai_azure/docs/models/type.md deleted file mode 100644 index eb0581e7..00000000 --- a/packages/mistralai_azure/docs/models/type.md +++ /dev/null @@ -1,8 +0,0 @@ -# Type - - -## Values - -| Name | Value | -| ------ | ------ | -| `TEXT` | text | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/usermessagerole.md b/packages/mistralai_azure/docs/models/usermessagerole.md deleted file mode 100644 index 171124e4..00000000 --- a/packages/mistralai_azure/docs/models/usermessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# UserMessageRole - - -## Values - -| Name | Value | -| ------ | ------ | -| `USER` | user | \ No newline at end of file diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py deleted file mode 100644 index 77df6aef..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py +++ /dev/null @@ -1,22 +0,0 @@ -# THIS FILE IS THE EXACT COPY OF THE ORIGINAL FILE FROM src/mistralai/_hooks/custom_user_agent.py -from typing import Union - -import httpx - -from .types import BeforeRequestContext, BeforeRequestHook - -PREFIX = "mistral-client-python/" - -class CustomUserAgentHook(BeforeRequestHook): - def before_request( - self, hook_ctx: BeforeRequestContext, request: httpx.Request - ) -> Union[httpx.Request, Exception]: - current = request.headers["user-agent"] - if current.startswith(PREFIX): - return request - - request.headers["user-agent"] = ( - PREFIX + current.split(" ")[1] - ) - - return request diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py deleted file mode 100644 index 304edfa2..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py +++ /dev/null @@ -1,15 +0,0 @@ -from .custom_user_agent import CustomUserAgentHook -from .types import Hooks - -# This file is only ever generated once on the first generation and then is free to be modified. -# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them -# in this file or in separate files in the hooks folder. - - -def init_hooks(hooks: Hooks): - # pylint: disable=unused-argument - """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook - with an instance of a hook that implements that specific Hook interface - Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance - """ - hooks.register_before_request_hook(CustomUserAgentHook()) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py deleted file mode 100644 index a40e451c..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .imageurl import ImageURL, ImageURLTypedDict -from mistralai_azure.types import BaseModel -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ImageURLChunkImageURLTypedDict = TypeAliasType( - "ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str] -) - - -ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) - - -ImageURLChunkType = Literal["image_url",] - - -class ImageURLChunkTypedDict(TypedDict): - r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - - image_url: ImageURLChunkImageURLTypedDict - type: NotRequired[ImageURLChunkType] - - -class ImageURLChunk(BaseModel): - r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - - image_url: ImageURLChunkImageURL - - type: Optional[ImageURLChunkType] = "image_url" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py b/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py deleted file mode 100644 index 32d2ca68..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ReferenceChunkType = Literal["reference",] - - -class ReferenceChunkTypedDict(TypedDict): - reference_ids: List[int] - type: NotRequired[ReferenceChunkType] - - -class ReferenceChunk(BaseModel): - reference_ids: List[int] - - type: Optional[ReferenceChunkType] = "reference" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py deleted file mode 100644 index 5845456e..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -Type = Literal["text",] - - -class TextChunkTypedDict(TypedDict): - text: str - type: NotRequired[Type] - - -class TextChunk(BaseModel): - text: str - - type: Optional[Type] = "text" diff --git a/packages/mistralai_gcp/.genignore b/packages/mistralai_gcp/.genignore deleted file mode 100644 index 76043176..00000000 --- a/packages/mistralai_gcp/.genignore +++ /dev/null @@ -1,5 +0,0 @@ -pyproject.toml -src/mistralai_gcp/sdk.py -README.md -USAGE.md -docs/sdks/**/README.md \ No newline at end of file diff --git a/packages/mistralai_gcp/.vscode/settings.json b/packages/mistralai_gcp/.vscode/settings.json deleted file mode 100644 index 8d79f0ab..00000000 --- a/packages/mistralai_gcp/.vscode/settings.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "python.testing.pytestArgs": ["tests", "-vv"], - "python.testing.unittestEnabled": false, - "python.testing.pytestEnabled": true, - "pylint.args": ["--rcfile=pylintrc"] -} diff --git a/packages/mistralai_gcp/USAGE.md b/packages/mistralai_gcp/USAGE.md deleted file mode 100644 index 30fa08aa..00000000 --- a/packages/mistralai_gcp/USAGE.md +++ /dev/null @@ -1,51 +0,0 @@ - -### Create Chat Completions - -This example shows how to create chat completions. - -```python -# Synchronous Example -from mistralai_gcp import MistralGCP -import os - -s = MistralGCP() - - -res = s.chat.complete(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="mistral-small-latest") - -if res is not None: - # handle response - pass -``` - -
- -The same SDK client can also be used to make asychronous requests by importing asyncio. -```python -# Asynchronous Example -import asyncio -from mistralai_gcp import MistralGCP -import os - -async def main(): - s = MistralGCP( - api_key=os.getenv("API_KEY", ""), - ) - res = await s.chat.complete_async(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], model="mistral-small-latest") - if res is not None: - # handle response - pass - -asyncio.run(main()) -``` - \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/assistantmessagerole.md b/packages/mistralai_gcp/docs/models/assistantmessagerole.md deleted file mode 100644 index 658229e7..00000000 --- a/packages/mistralai_gcp/docs/models/assistantmessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# AssistantMessageRole - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md b/packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md deleted file mode 100644 index c807dacd..00000000 --- a/packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md +++ /dev/null @@ -1,10 +0,0 @@ -# CompletionResponseStreamChoice - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | -| `index` | *int* | :heavy_check_mark: | N/A | -| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | -| `finish_reason` | [Nullable[models.FinishReason]](../models/finishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/deltamessage.md b/packages/mistralai_gcp/docs/models/deltamessage.md deleted file mode 100644 index 61deabbf..00000000 --- a/packages/mistralai_gcp/docs/models/deltamessage.md +++ /dev/null @@ -1,10 +0,0 @@ -# DeltaMessage - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `content` | [OptionalNullable[models.Content]](../models/content.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/imageurl.md b/packages/mistralai_gcp/docs/models/imageurl.md deleted file mode 100644 index 7c2bcbc3..00000000 --- a/packages/mistralai_gcp/docs/models/imageurl.md +++ /dev/null @@ -1,9 +0,0 @@ -# ImageURL - - -## Fields - -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `url` | *str* | :heavy_check_mark: | N/A | -| `detail` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/imageurlchunk.md b/packages/mistralai_gcp/docs/models/imageurlchunk.md deleted file mode 100644 index f1b926ef..00000000 --- a/packages/mistralai_gcp/docs/models/imageurlchunk.md +++ /dev/null @@ -1,11 +0,0 @@ -# ImageURLChunk - -{"type":"image_url","image_url":{"url":"data:image/png;base64,iVBORw0 - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `image_url` | [models.ImageURLChunkImageURL](../models/imageurlchunkimageurl.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ImageURLChunkType]](../models/imageurlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/imageurlchunktype.md b/packages/mistralai_gcp/docs/models/imageurlchunktype.md deleted file mode 100644 index 2064a0b4..00000000 --- a/packages/mistralai_gcp/docs/models/imageurlchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ImageURLChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `IMAGE_URL` | image_url | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/referencechunk.md b/packages/mistralai_gcp/docs/models/referencechunk.md deleted file mode 100644 index a132ca2f..00000000 --- a/packages/mistralai_gcp/docs/models/referencechunk.md +++ /dev/null @@ -1,9 +0,0 @@ -# ReferenceChunk - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ReferenceChunkType]](../models/referencechunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/referencechunktype.md b/packages/mistralai_gcp/docs/models/referencechunktype.md deleted file mode 100644 index 1e0e2fe6..00000000 --- a/packages/mistralai_gcp/docs/models/referencechunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ReferenceChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `REFERENCE` | reference | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/role.md b/packages/mistralai_gcp/docs/models/role.md deleted file mode 100644 index affca78d..00000000 --- a/packages/mistralai_gcp/docs/models/role.md +++ /dev/null @@ -1,8 +0,0 @@ -# Role - - -## Values - -| Name | Value | -| -------- | -------- | -| `SYSTEM` | system | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/textchunk.md b/packages/mistralai_gcp/docs/models/textchunk.md deleted file mode 100644 index 6daab3c3..00000000 --- a/packages/mistralai_gcp/docs/models/textchunk.md +++ /dev/null @@ -1,9 +0,0 @@ -# TextChunk - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `text` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/thinkchunktype.md b/packages/mistralai_gcp/docs/models/thinkchunktype.md deleted file mode 100644 index baf6f755..00000000 --- a/packages/mistralai_gcp/docs/models/thinkchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ThinkChunkType - - -## Values - -| Name | Value | -| ---------- | ---------- | -| `THINKING` | thinking | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolmessagerole.md b/packages/mistralai_gcp/docs/models/toolmessagerole.md deleted file mode 100644 index c24e59c0..00000000 --- a/packages/mistralai_gcp/docs/models/toolmessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolMessageRole - - -## Values - -| Name | Value | -| ------ | ------ | -| `TOOL` | tool | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/type.md b/packages/mistralai_gcp/docs/models/type.md deleted file mode 100644 index eb0581e7..00000000 --- a/packages/mistralai_gcp/docs/models/type.md +++ /dev/null @@ -1,8 +0,0 @@ -# Type - - -## Values - -| Name | Value | -| ------ | ------ | -| `TEXT` | text | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usermessagerole.md b/packages/mistralai_gcp/docs/models/usermessagerole.md deleted file mode 100644 index 171124e4..00000000 --- a/packages/mistralai_gcp/docs/models/usermessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# UserMessageRole - - -## Values - -| Name | Value | -| ------ | ------ | -| `USER` | user | \ No newline at end of file diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py deleted file mode 100644 index 77df6aef..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py +++ /dev/null @@ -1,22 +0,0 @@ -# THIS FILE IS THE EXACT COPY OF THE ORIGINAL FILE FROM src/mistralai/_hooks/custom_user_agent.py -from typing import Union - -import httpx - -from .types import BeforeRequestContext, BeforeRequestHook - -PREFIX = "mistral-client-python/" - -class CustomUserAgentHook(BeforeRequestHook): - def before_request( - self, hook_ctx: BeforeRequestContext, request: httpx.Request - ) -> Union[httpx.Request, Exception]: - current = request.headers["user-agent"] - if current.startswith(PREFIX): - return request - - request.headers["user-agent"] = ( - PREFIX + current.split(" ")[1] - ) - - return request diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py deleted file mode 100644 index ddb53f21..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .imageurl import ImageURL, ImageURLTypedDict -from mistralai_gcp.types import BaseModel -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ImageURLChunkImageURLTypedDict = TypeAliasType( - "ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str] -) - - -ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) - - -ImageURLChunkType = Literal["image_url",] - - -class ImageURLChunkTypedDict(TypedDict): - r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - - image_url: ImageURLChunkImageURLTypedDict - type: NotRequired[ImageURLChunkType] - - -class ImageURLChunk(BaseModel): - r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - - image_url: ImageURLChunkImageURL - - type: Optional[ImageURLChunkType] = "image_url" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py deleted file mode 100644 index 904e8b82..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ReferenceChunkType = Literal["reference",] - - -class ReferenceChunkTypedDict(TypedDict): - reference_ids: List[int] - type: NotRequired[ReferenceChunkType] - - -class ReferenceChunk(BaseModel): - reference_ids: List[int] - - type: Optional[ReferenceChunkType] = "reference" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py deleted file mode 100644 index c4a8cf28..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -Type = Literal["text",] - - -class TextChunkTypedDict(TypedDict): - text: str - type: NotRequired[Type] - - -class TextChunk(BaseModel): - text: str - - type: Optional[Type] = "text" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py deleted file mode 100644 index de48fbbb..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py +++ /dev/null @@ -1,233 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import json -import weakref -from typing import Any, Optional, Union, cast - -import google.auth -import google.auth.credentials -import google.auth.transport -import google.auth.transport.requests -import httpx - -from mistralai_gcp import models -from mistralai_gcp._hooks import BeforeRequestHook, SDKHooks -from mistralai_gcp.chat import Chat -from mistralai_gcp.fim import Fim -from mistralai_gcp.types import UNSET, OptionalNullable - -from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients -from .sdkconfiguration import SDKConfiguration -from .utils.logger import Logger, get_default_logger -from .utils.retries import RetryConfig - -LEGACY_MODEL_ID_FORMAT = { - "codestral-2405": "codestral@2405", - "mistral-large-2407": "mistral-large@2407", - "mistral-nemo-2407": "mistral-nemo@2407", -} - - -def get_model_info(model: str) -> tuple[str, str]: - # if the model requiers the legacy fomat, use it, else do nothing. - if model in LEGACY_MODEL_ID_FORMAT: - return "-".join(model.split("-")[:-1]), LEGACY_MODEL_ID_FORMAT[model] - return model, model - - -class MistralGoogleCloud(BaseSDK): - r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" - - chat: Chat - r"""Chat Completion API.""" - fim: Fim - r"""Fill-in-the-middle API.""" - - def __init__( - self, - region: str = "europe-west4", - project_id: Optional[str] = None, - access_token: Optional[str] = None, - client: Optional[HttpClient] = None, - async_client: Optional[AsyncHttpClient] = None, - retry_config: OptionalNullable[RetryConfig] = UNSET, - timeout_ms: Optional[int] = None, - debug_logger: Optional[Logger] = None, - ) -> None: - r"""Instantiates the SDK configuring it with the provided parameters. - - :param api_key: The api_key required for authentication - :param server: The server by name to use for all methods - :param server_url: The server URL to use for all methods - :param url_params: Parameters to optionally template the server URL with - :param client: The HTTP client to use for all synchronous methods - :param async_client: The Async HTTP client to use for all asynchronous methods - :param retry_config: The retry configuration to use for all supported methods - :param timeout_ms: Optional request timeout applied to each operation in milliseconds - """ - - credentials = None - if not access_token: - credentials, loaded_project_id = google.auth.default( - scopes=["https://www.googleapis.com/auth/cloud-platform"], - ) - - # default will already raise a google.auth.exceptions.DefaultCredentialsError if no credentials are found - assert isinstance( - credentials, google.auth.credentials.Credentials - ), "credentials must be an instance of google.auth.credentials.Credentials" - - credentials.refresh(google.auth.transport.requests.Request()) - project_id = project_id or loaded_project_id - - if project_id is None: - raise ValueError("project_id must be provided") - - def auth_token() -> str: - if access_token: - return access_token - - assert credentials is not None, "credentials must be initialized" - credentials.refresh(google.auth.transport.requests.Request()) - token = credentials.token - if not token: - raise Exception("Failed to get token from credentials") - return token - - client_supplied = True - if client is None: - client = httpx.Client() - client_supplied = False - - assert issubclass( - type(client), HttpClient - ), "The provided client must implement the HttpClient protocol." - - async_client_supplied = True - if async_client is None: - async_client = httpx.AsyncClient() - async_client_supplied = False - - if debug_logger is None: - debug_logger = get_default_logger() - - assert issubclass( - type(async_client), AsyncHttpClient - ), "The provided async_client must implement the AsyncHttpClient protocol." - - security: Any = None - if callable(auth_token): - security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment - api_key=auth_token() - ) - else: - security = models.Security(api_key=auth_token) - - BaseSDK.__init__( - self, - SDKConfiguration( - client=client, - client_supplied=client_supplied, - async_client=async_client, - async_client_supplied=async_client_supplied, - security=security, - server_url=f"https://{region}-aiplatform.googleapis.com", - server=None, - retry_config=retry_config, - timeout_ms=timeout_ms, - debug_logger=debug_logger, - ), - ) - - hooks = SDKHooks() - hook = GoogleCloudBeforeRequestHook(region, project_id) - hooks.register_before_request_hook(hook) - current_server_url, *_ = self.sdk_configuration.get_server_details() - server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, client - ) - if current_server_url != server_url: - self.sdk_configuration.server_url = server_url - - # pylint: disable=protected-access - self.sdk_configuration.__dict__["_hooks"] = hooks - - weakref.finalize( - self, - close_clients, - cast(ClientOwner, self.sdk_configuration), - self.sdk_configuration.client, - self.sdk_configuration.client_supplied, - self.sdk_configuration.async_client, - self.sdk_configuration.async_client_supplied, - ) - - self._init_sdks() - - def _init_sdks(self): - self.chat = Chat(self.sdk_configuration) - self.fim = Fim(self.sdk_configuration) - - def __enter__(self): - return self - - async def __aenter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.client is not None - and not self.sdk_configuration.client_supplied - ): - self.sdk_configuration.client.close() - self.sdk_configuration.client = None - - async def __aexit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.async_client is not None - and not self.sdk_configuration.async_client_supplied - ): - await self.sdk_configuration.async_client.aclose() - self.sdk_configuration.async_client = None - - -class GoogleCloudBeforeRequestHook(BeforeRequestHook): - def __init__(self, region: str, project_id: str): - self.region = region - self.project_id = project_id - - def before_request( - self, hook_ctx, request: httpx.Request - ) -> Union[httpx.Request, Exception]: - # The goal of this function is to template in the region, project and model into the URL path - # We do this here so that the API remains more user-friendly - model_id = None - new_content = None - if request.content: - parsed = json.loads(request.content.decode("utf-8")) - model_raw = parsed.get("model") - model_name, model_id = get_model_info(model_raw) - parsed["model"] = model_name - new_content = json.dumps(parsed).encode("utf-8") - - if model_id == "": - raise ValueError("model must be provided") - - stream = "streamRawPredict" in request.url.path - specifier = "streamRawPredict" if stream else "rawPredict" - url = f"/v1/projects/{self.project_id}/locations/{self.region}/publishers/mistralai/models/{model_id}:{specifier}" - - headers = dict(request.headers) - # Delete content-length header as it will need to be recalculated - headers.pop("content-length", None) - - next_request = httpx.Request( - method=request.method, - url=request.url.copy_with(path=url), - headers=headers, - content=new_content, - stream=None, - ) - - return next_request diff --git a/pylintrc b/pylintrc index d4e4ba5e..2dc62b0e 100644 --- a/pylintrc +++ b/pylintrc @@ -103,7 +103,7 @@ source-roots=src # When enabled, pylint would attempt to guess common misconfiguration and emit # user-friendly hints instead of false-positive error messages. -suggestion-mode=yes +# Note: suggestion-mode was removed in pylint 3.0 # Allow loading of arbitrary C extensions. Extensions are imported into the # active Python interpreter and may run arbitrary code. diff --git a/pyproject.toml b/pyproject.toml index f8006e7d..c1762f0a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "2.0.0a3" +version = "2.0.0a4" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" @@ -8,7 +8,7 @@ readme = "README.md" dependencies = [ "eval-type-backport >=0.2.0", "httpx >=0.28.1", - "pydantic >=2.10.3", + "pydantic >=2.11.2", "python-dateutil >=2.8.2", "typing-inspection >=0.4.0", "pyyaml (>=6.0.2,<7.0.0)", @@ -65,11 +65,18 @@ default-groups = [ [tool.setuptools.package-data] "*" = ["py.typed", "src/mistralai/client/py.typed"] +[tool.hatch.build] +dev-mode-dirs = [ + "src", + "packages/azure/src", + "packages/gcp/src", +] + [tool.hatch.build.targets.sdist] include = [ "src/mistralai", - "packages/mistralai_azure/src/mistralai_azure", - "packages/mistralai_gcp/src/mistralai_gcp", + "packages/azure/src/mistralai", + "packages/gcp/src/mistralai", ] [tool.hatch.build.targets.sdist.force-include] @@ -79,27 +86,27 @@ include = [ [tool.hatch.build.targets.wheel] include = [ "src/mistralai", - "packages/mistralai_azure/src/mistralai_azure", - "packages/mistralai_gcp/src/mistralai_gcp", + "packages/azure/src/mistralai", + "packages/gcp/src/mistralai", ] [tool.hatch.build.targets.wheel.sources] "src/mistralai" = "mistralai" -"packages/mistralai_azure/src/mistralai_azure" = "mistralai_azure" -"packages/mistralai_gcp/src/mistralai_gcp" = "mistralai_gcp" +"packages/azure/src/mistralai/azure" = "mistralai/azure" +"packages/gcp/src/mistralai/gcp" = "mistralai/gcp" [build-system] requires = ["hatchling"] build-backend = "hatchling.build" [tool.pytest.ini_options] -pythonpath = ["src"] +pythonpath = ["src", "packages/azure/src", "packages/gcp/src"] [tool.mypy] disable_error_code = "misc" namespace_packages = true explicit_package_bases = true -mypy_path = "src" +mypy_path = "src:packages/azure/src:packages/gcp/src" [[tool.mypy.overrides]] module = "typing_inspect" @@ -112,7 +119,8 @@ module = [ "authlib.*", "websockets.*", "mcp.*", - "griffe.*" + "griffe.*", + "google.*" ] ignore_missing_imports = true diff --git a/scripts/lint_custom_code.sh b/scripts/lint_custom_code.sh index 57bab71a..4baa3d88 100755 --- a/scripts/lint_custom_code.sh +++ b/scripts/lint_custom_code.sh @@ -7,7 +7,19 @@ if [ -f src/mistralai/__init__.py ]; then echo "ERROR: PEP 420 violation - src/mistralai/__init__.py must not exist" ERRORS=1 else - echo "-> PEP 420 namespace OK" + echo "-> PEP 420 namespace OK (core)" +fi +if [ -f packages/azure/src/mistralai/__init__.py ]; then + echo "ERROR: PEP 420 violation - packages/azure/src/mistralai/__init__.py must not exist" + ERRORS=1 +else + echo "-> PEP 420 namespace OK (azure)" +fi +if [ -f packages/gcp/src/mistralai/__init__.py ]; then + echo "ERROR: PEP 420 violation - packages/gcp/src/mistralai/__init__.py must not exist" + ERRORS=1 +else + echo "-> PEP 420 namespace OK (gcp)" fi echo "Running mypy..." @@ -19,6 +31,16 @@ uv run mypy src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" uv run mypy src/mistralai/client/_hooks/ \ --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on azure hooks" +uv run mypy packages/azure/src/mistralai/azure/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on azure sdk" +uv run mypy packages/azure/src/mistralai/azure/client/sdk.py || ERRORS=1 +echo "-> running on gcp hooks" +uv run mypy packages/gcp/src/mistralai/gcp/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on gcp sdk" +uv run mypy packages/gcp/src/mistralai/gcp/client/sdk.py || ERRORS=1 echo "-> running on scripts" uv run mypy scripts/ || ERRORS=1 @@ -29,6 +51,14 @@ echo "-> running on extra" uv run pyright src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" uv run pyright src/mistralai/client/_hooks/ || ERRORS=1 +echo "-> running on azure hooks" +uv run pyright packages/azure/src/mistralai/azure/client/_hooks/ || ERRORS=1 +echo "-> running on azure sdk" +uv run pyright packages/azure/src/mistralai/azure/client/sdk.py || ERRORS=1 +echo "-> running on gcp hooks" +uv run pyright packages/gcp/src/mistralai/gcp/client/_hooks/ || ERRORS=1 +echo "-> running on gcp sdk" +uv run pyright packages/gcp/src/mistralai/gcp/client/sdk.py || ERRORS=1 echo "-> running on scripts" uv run pyright scripts/ || ERRORS=1 @@ -40,6 +70,16 @@ uv run ruff check src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" uv run ruff check src/mistralai/client/_hooks/ \ --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on azure hooks" +uv run ruff check packages/azure/src/mistralai/azure/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on azure sdk" +uv run ruff check packages/azure/src/mistralai/azure/client/sdk.py || ERRORS=1 +echo "-> running on gcp hooks" +uv run ruff check packages/gcp/src/mistralai/gcp/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on gcp sdk" +uv run ruff check packages/gcp/src/mistralai/gcp/client/sdk.py || ERRORS=1 echo "-> running on scripts" uv run ruff check scripts/ || ERRORS=1 diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index 22fc94e5..998b8dbe 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -1,13 +1,18 @@ #!/bin/bash -# Default retry count +# Defaults RETRY_COUNT=3 +NO_EXTRA_DEP=false # Parse command line arguments while [[ $# -gt 0 ]]; do case $1 in + --no-extra-dep) + NO_EXTRA_DEP=true + shift + ;; --retry-count) - RETRY_COUNT="$1" + RETRY_COUNT="$2" shift 2 ;; --help) @@ -25,14 +30,15 @@ while [[ $# -gt 0 ]]; do esac done -# List of files to exclude +# List of files to always exclude exclude_files=( "examples/mistral/chat/chatbot_with_streaming.py" "examples/mistral/agents/async_conversation_run_mcp_remote_auth.py" - "examples/mistral/jobs/async_fine_tuning_chat.py" "examples/mistral/jobs/async_fine_tuning.py" + "examples/mistral/jobs/async_fine_tuning_chat.py" "examples/mistral/jobs/fine_tuning.py" "examples/mistral/jobs/fine_tuning_dry_run.py" + "examples/mistral/jobs/async_jobs_ocr_batch_annotation.py" "examples/mistral/classifier/async_classifier.py" "examples/mistral/mcp_servers/sse_server.py" "examples/mistral/mcp_servers/stdio_server.py" @@ -44,6 +50,21 @@ exclude_files=( "examples/mistral/audio/async_realtime_transcription_stream.py" ) +# Files that require extra dependencies (agents, mcp, audio, etc.) +extra_dep_files=( + "examples/mistral/agents/" + "examples/mistral/mcp_servers/" + "examples/mistral/audio/" +) + +if [ "$NO_EXTRA_DEP" = true ]; then + for pattern in "${extra_dep_files[@]}"; do + for f in ${pattern}*.py; do + [ -f "$f" ] && exclude_files+=("$f") + done + done +fi + failed=0 echo "Skipping scripts" diff --git a/tasks.py b/tasks.py index 0d5483e1..8b1bc3f0 100644 --- a/tasks.py +++ b/tasks.py @@ -19,8 +19,6 @@ def update_speakeasy( workflow_lock_path: str = WORKFLOW_LOCK_PATH, verbose: bool = False, ): - if not re.match(r'^\d+\.\d+\.\d+$', version): - raise ValueError(f"Invalid version format: {version}. Expected format: X.Y.Z (e.g., 1.2.3)") """ Update the speakeasy version and pin the openapi specs to the current revision. @@ -30,6 +28,8 @@ def update_speakeasy( inv update-speakeasy --version "1.580.2" --targets "mistralai-sdk" --workflow-path ".speakeasy/workflow.yaml" --workflow-lock-path ".speakeasy/workflow.lock.yaml" inv update-speakeasy --version "1.580.2" --targets "mistralai-sdk" --workflow-path ".speakeasy/workflow.yaml" --workflow-lock-path ".speakeasy/workflow.lock.yaml" --verbose """ + if not re.match(r'^\d+\.\d+\.\d+$', version): + raise ValueError(f"Invalid version format: {version}. Expected format: X.Y.Z (e.g., 1.2.3)") for target in targets: try: SpeakeasyTargets(target) diff --git a/tests/test_azure_integration.py b/tests/test_azure_integration.py new file mode 100644 index 00000000..ac4e38a1 --- /dev/null +++ b/tests/test_azure_integration.py @@ -0,0 +1,433 @@ +""" +Integration tests for Azure SDK. + +These tests require credentials and make real API calls. +Skip if AZURE_API_KEY env var is not set. + +Prerequisites: + 1. Azure API key (stored in Bitwarden at "[MaaS] - Azure Foundry API key") + 2. Tailscale connected via gw-0 exit node + +Usage: + AZURE_API_KEY=xxx pytest tests/test_azure_integration.py -v + +Environment variables: + AZURE_API_KEY: API key (required) + AZURE_ENDPOINT: Base URL (default: https://maas-qa-aifoundry.services.ai.azure.com/models) + AZURE_MODEL: Model name (default: maas-qa-ministral-3b) + AZURE_API_VERSION: API version (default: 2024-05-01-preview) + +Note: AZURE_ENDPOINT should be the base URL without path suffixes. +The SDK appends /chat/completions to this URL. The api_version parameter +is automatically injected as a query parameter by the SDK. + +Available models: + Chat: maas-qa-ministral-3b, maas-qa-mistral-large-3, maas-qa-mistral-medium-2505 + OCR: maas-qa-mistral-document-ai-2505, maas-qa-mistral-document-ai-2512 + (OCR uses a separate endpoint, not tested here) +""" +import json +import os + +import pytest + +# Configuration from env vars +AZURE_API_KEY = os.environ.get("AZURE_API_KEY") +AZURE_ENDPOINT = os.environ.get( + "AZURE_ENDPOINT", + "https://maas-qa-aifoundry.services.ai.azure.com/models", +) +AZURE_MODEL = os.environ.get("AZURE_MODEL", "maas-qa-ministral-3b") +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +SKIP_REASON = "AZURE_API_KEY env var required" + +pytestmark = pytest.mark.skipif( + not AZURE_API_KEY, + reason=SKIP_REASON +) + +# Shared tool definition for tool-call tests +WEATHER_TOOL = { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the weather in a city", + "parameters": { + "type": "object", + "properties": {"city": {"type": "string"}}, + "required": ["city"], + }, + }, +} + + +@pytest.fixture +def azure_client(): + """Create an Azure client with api_version parameter.""" + from mistralai.azure.client import MistralAzure + assert AZURE_API_KEY is not None, "AZURE_API_KEY must be set" + return MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + ) + + +class TestAzureChatComplete: + """Test synchronous chat completion.""" + + def test_basic_completion(self, azure_client): + """Test basic chat completion returns a response.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message is not None + assert res.choices[0].message.content is not None + assert len(res.choices[0].message.content) > 0 + + def test_completion_with_system_message(self, azure_client): + """Test chat completion with system + user message.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "system", "content": "You are a pirate. Respond in pirate speak."}, + {"role": "user", "content": "Say hello."}, + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + assert len(res.choices[0].message.content) > 0 + + def test_completion_with_max_tokens(self, azure_client): + """Test chat completion respects max_tokens.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Count from 1 to 100."} + ], + max_tokens=10, + ) + assert res is not None + assert res.choices[0].finish_reason in ("length", "stop") + + def test_completion_with_temperature(self, azure_client): + """Test chat completion accepts temperature parameter.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'test'."} + ], + temperature=0.0, + ) + assert res is not None + assert res.choices[0].message.content is not None + + def test_completion_with_stop_sequence(self, azure_client): + """Test chat completion stops at stop sequence.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Write three sentences about the sky."} + ], + stop=["."], + ) + assert res is not None + content = res.choices[0].message.content + assert content is not None + # The model should stop at or before the first period + assert content.count(".") <= 1 + + def test_completion_with_random_seed(self, azure_client): + """Test chat completion with random_seed returns valid responses.""" + res1 = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'deterministic'."} + ], + random_seed=42, + ) + res2 = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'deterministic'."} + ], + random_seed=42, + ) + # Both should return valid responses (not asserting equality due to model non-determinism) + assert res1.choices[0].message.content is not None + assert res2.choices[0].message.content is not None + + def test_multi_turn_conversation(self, azure_client): + """Test multi-turn conversation with user/assistant round-trip.""" + res1 = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "My name is Alice."} + ], + ) + assert res1.choices[0].message.content is not None + + res2 = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "My name is Alice."}, + {"role": "assistant", "content": res1.choices[0].message.content}, + {"role": "user", "content": "What is my name?"}, + ], + ) + assert res2.choices[0].message.content is not None + assert "Alice" in res2.choices[0].message.content + + def test_tool_call(self, azure_client): + """Test that the model returns a tool call when given tools.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + assert res is not None + choice = res.choices[0] + assert choice.message.tool_calls is not None + assert len(choice.message.tool_calls) > 0 + tool_call = choice.message.tool_calls[0] + assert tool_call.function.name == "get_weather" + args = json.loads(tool_call.function.arguments) + assert "city" in args + + def test_json_response_format(self, azure_client): + """Test JSON response format returns valid JSON.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Return a JSON object with a key 'greeting' and value 'hello'."} + ], + response_format={"type": "json_object"}, + ) + assert res is not None + content = res.choices[0].message.content + assert content is not None + parsed = json.loads(content) + assert isinstance(parsed, dict) + + def test_completion_with_n(self, azure_client): + """Test completion with n=2 returns multiple choices.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say a random word."} + ], + n=2, + ) + assert res is not None + assert len(res.choices) == 2 + for choice in res.choices: + assert choice.message.content is not None + + +class TestAzureChatStream: + """Test streaming chat completion.""" + + def test_basic_stream(self, azure_client): + """Test streaming returns chunks with content.""" + stream = azure_client.chat.stream( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + + chunks = list(stream) + assert len(chunks) > 0 + + content = "" + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.content: + content += chunk.data.choices[0].delta.content + + assert len(content) > 0 + + def test_stream_with_max_tokens(self, azure_client): + """Test streaming respects max_tokens truncation.""" + stream = azure_client.chat.stream( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Count from 1 to 100."} + ], + max_tokens=10, + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # Find finish_reason in any chunk + finish_reasons = [ + chunk.data.choices[0].finish_reason + for chunk in chunks + if chunk.data.choices and chunk.data.choices[0].finish_reason is not None + ] + assert len(finish_reasons) > 0 + assert finish_reasons[-1] in ("length", "stop") + + def test_stream_finish_reason(self, azure_client): + """Test that the last chunk has a finish_reason.""" + stream = azure_client.chat.stream( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hi'."} + ], + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # The final chunk(s) should contain a finish_reason + finish_reasons = [ + chunk.data.choices[0].finish_reason + for chunk in chunks + if chunk.data.choices and chunk.data.choices[0].finish_reason is not None + ] + assert len(finish_reasons) > 0 + assert finish_reasons[-1] == "stop" + + def test_stream_tool_call(self, azure_client): + """Test tool call via streaming, collecting tool_call delta chunks.""" + stream = azure_client.chat.stream( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # Collect tool call information from delta chunks + tool_call_found = False + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.tool_calls: + tool_call_found = True + break + + assert tool_call_found, "Expected tool_call delta chunks in stream" + + +class TestAzureChatCompleteAsync: + """Test async chat completion.""" + + @pytest.mark.asyncio + async def test_basic_completion_async(self, azure_client): + """Test async chat completion returns a response.""" + res = await azure_client.chat.complete_async( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_completion_with_system_message_async(self, azure_client): + """Test async chat completion with system + user message.""" + res = await azure_client.chat.complete_async( + model=AZURE_MODEL, + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Say 'hello'."}, + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_tool_call_async(self, azure_client): + """Test async tool call returns tool_calls.""" + res = await azure_client.chat.complete_async( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + assert res is not None + choice = res.choices[0] + assert choice.message.tool_calls is not None + assert len(choice.message.tool_calls) > 0 + assert choice.message.tool_calls[0].function.name == "get_weather" + + +class TestAzureChatStreamAsync: + """Test async streaming chat completion.""" + + @pytest.mark.asyncio + async def test_basic_stream_async(self, azure_client): + """Test async streaming returns chunks with content.""" + stream = await azure_client.chat.stream_async( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + + content = "" + async for chunk in stream: + if chunk.data.choices and chunk.data.choices[0].delta.content: + content += chunk.data.choices[0].delta.content + + assert len(content) > 0 + + +class TestAzureContextManager: + """Test context manager support.""" + + def test_sync_context_manager(self): + """Test that MistralAzure works as a sync context manager.""" + from mistralai.azure.client import MistralAzure + assert AZURE_API_KEY is not None, "AZURE_API_KEY must be set" + with MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + ) as client: + res = client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'context'."} + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_async_context_manager(self): + """Test that MistralAzure works as an async context manager.""" + from mistralai.azure.client import MistralAzure + assert AZURE_API_KEY is not None, "AZURE_API_KEY must be set" + async with MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + ) as client: + res = await client.chat.complete_async( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'async context'."} + ], + ) + assert res is not None + assert res.choices[0].message.content is not None diff --git a/tests/test_azure_v2_parity.py b/tests/test_azure_v2_parity.py new file mode 100644 index 00000000..8cd89bf4 --- /dev/null +++ b/tests/test_azure_v2_parity.py @@ -0,0 +1,269 @@ +""" +Parity tests for the Azure v2 SDK. + +Verifies that the regenerated mistralai.azure package exposes +the same public API surface as the v1 mistralai_azure package. +Uses introspection only — no API calls or credentials required. +""" +import inspect + +import pytest + +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.chat import Chat +from mistralai.azure.client.ocr import Ocr +from mistralai.azure.client.types import UNSET + +AZURE_METHODS: dict[str, set[str]] = { + "chat": {"complete", "stream"}, + "ocr": {"process"}, +} + +TESTED_METHODS: set[str] = set() + +_EMPTY = inspect.Parameter.empty + + +def mark_tested(resource: str, method: str) -> None: + TESTED_METHODS.add(f"{resource}.{method}") + + +# --------------------------------------------------------------------------- +# Expected parameter specs: (name, expected_default) +# Use _EMPTY for required params, UNSET for OptionalNullable, None for Optional +# --------------------------------------------------------------------------- + +CONSTRUCTOR_PARAMS = [ + ("api_key", _EMPTY), + ("server", None), + ("server_url", None), + ("url_params", None), + ("client", None), + ("async_client", None), + ("retry_config", UNSET), + ("timeout_ms", None), + ("debug_logger", None), + ("api_version", "2024-05-01-preview"), +] + +CHAT_COMPLETE_PARAMS = [ + ("messages", _EMPTY), + ("model", "azureai"), + ("temperature", UNSET), + ("top_p", None), + ("max_tokens", UNSET), + ("stream", False), + ("stop", None), + ("random_seed", UNSET), + ("metadata", UNSET), + ("response_format", None), + ("tools", UNSET), + ("tool_choice", None), + ("presence_penalty", None), + ("frequency_penalty", None), + ("n", UNSET), + ("prediction", None), + ("parallel_tool_calls", None), + ("prompt_mode", UNSET), + ("safe_prompt", None), + ("retries", UNSET), + ("server_url", None), + ("timeout_ms", None), + ("http_headers", None), +] + +CHAT_STREAM_PARAMS = [ + (name, True if name == "stream" else default) + for name, default in CHAT_COMPLETE_PARAMS +] + +OCR_PROCESS_PARAMS = [ + ("model", _EMPTY), + ("document", _EMPTY), + ("id", None), + ("pages", UNSET), + ("include_image_base64", UNSET), + ("image_limit", UNSET), + ("image_min_size", UNSET), + ("bbox_annotation_format", UNSET), + ("document_annotation_format", UNSET), + ("document_annotation_prompt", UNSET), + ("table_format", UNSET), + ("extract_header", None), + ("extract_footer", None), + ("retries", UNSET), + ("server_url", None), + ("timeout_ms", None), + ("http_headers", None), +] + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- + + +class TestAzureSDKStructure: + def test_sdk_has_chat(self): + assert "chat" in MistralAzure.__annotations__ + + def test_sdk_has_ocr(self): + assert "ocr" in MistralAzure.__annotations__ + + @pytest.mark.parametrize("param_name,expected_default", CONSTRUCTOR_PARAMS) + def test_constructor_param(self, param_name, expected_default): + sig = inspect.signature(MistralAzure.__init__) + assert param_name in sig.parameters, f"Missing constructor param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Constructor param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + @pytest.mark.parametrize("method", ["__enter__", "__exit__", "__aenter__", "__aexit__"]) + def test_context_manager_support(self, method): + assert hasattr(MistralAzure, method), f"MistralAzure missing {method}" + + +class TestAzureChat: + def test_has_complete(self): + assert hasattr(Chat, "complete") + mark_tested("chat", "complete") + + def test_has_complete_async(self): + assert hasattr(Chat, "complete_async") + mark_tested("chat", "complete_async") + + def test_has_stream(self): + assert hasattr(Chat, "stream") + mark_tested("chat", "stream") + + def test_has_stream_async(self): + assert hasattr(Chat, "stream_async") + mark_tested("chat", "stream_async") + + # -- complete params -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_COMPLETE_PARAMS) + def test_complete_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.complete) + assert param_name in sig.parameters, f"Chat.complete missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.complete param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream params -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_STREAM_PARAMS) + def test_stream_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.stream) + assert param_name in sig.parameters, f"Chat.stream missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.stream param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- complete_async matches complete -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_COMPLETE_PARAMS) + def test_complete_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.complete_async) + assert param_name in sig.parameters, f"Chat.complete_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.complete_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream_async matches stream -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_STREAM_PARAMS) + def test_stream_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.stream_async) + assert param_name in sig.parameters, f"Chat.stream_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.stream_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- sync/async parity -- + def test_complete_async_matches_complete(self): + sync_params = set(inspect.signature(Chat.complete).parameters) - {"self"} + async_params = set(inspect.signature(Chat.complete_async).parameters) - {"self"} + assert sync_params == async_params + + def test_stream_async_matches_stream(self): + sync_params = set(inspect.signature(Chat.stream).parameters) - {"self"} + async_params = set(inspect.signature(Chat.stream_async).parameters) - {"self"} + assert sync_params == async_params + + # -- key defaults -- + def test_complete_model_defaults_azureai(self): + sig = inspect.signature(Chat.complete) + assert sig.parameters["model"].default == "azureai" + + def test_stream_model_defaults_azureai(self): + sig = inspect.signature(Chat.stream) + assert sig.parameters["model"].default == "azureai" + + def test_complete_stream_defaults_false(self): + sig = inspect.signature(Chat.complete) + assert sig.parameters["stream"].default is False + + def test_stream_stream_defaults_true(self): + sig = inspect.signature(Chat.stream) + assert sig.parameters["stream"].default is True + + +class TestAzureOcr: + def test_has_process(self): + assert hasattr(Ocr, "process") + mark_tested("ocr", "process") + + def test_has_process_async(self): + assert hasattr(Ocr, "process_async") + mark_tested("ocr", "process_async") + + # -- process params -- + @pytest.mark.parametrize("param_name,expected_default", OCR_PROCESS_PARAMS) + def test_process_has_param(self, param_name, expected_default): + sig = inspect.signature(Ocr.process) + assert param_name in sig.parameters, f"Ocr.process missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Ocr.process param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- process_async matches process -- + @pytest.mark.parametrize("param_name,expected_default", OCR_PROCESS_PARAMS) + def test_process_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Ocr.process_async) + assert param_name in sig.parameters, f"Ocr.process_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Ocr.process_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- sync/async parity -- + def test_process_async_matches_process(self): + sync_params = set(inspect.signature(Ocr.process).parameters) - {"self"} + async_params = set(inspect.signature(Ocr.process_async).parameters) - {"self"} + assert sync_params == async_params + + +class TestAzureCoverage: + def test_all_methods_tested(self): + expected = set() + for resource, methods in AZURE_METHODS.items(): + for method in methods: + expected.add(f"{resource}.{method}") + expected.add(f"{resource}.{method}_async") + untested = expected - TESTED_METHODS + assert not untested, f"Untested methods: {untested}" + + def test_no_unexpected_public_methods_on_chat(self): + public = {m for m in dir(Chat) if not m.startswith("_") and callable(getattr(Chat, m, None))} + known = {"complete", "complete_async", "stream", "stream_async", "do_request", "do_request_async"} + unexpected = public - known + assert not unexpected, f"Unexpected Chat methods: {unexpected}" + + def test_no_unexpected_public_methods_on_ocr(self): + public = {m for m in dir(Ocr) if not m.startswith("_") and callable(getattr(Ocr, m, None))} + known = {"process", "process_async", "do_request", "do_request_async"} + unexpected = public - known + assert not unexpected, f"Unexpected Ocr methods: {unexpected}" diff --git a/tests/test_gcp_integration.py b/tests/test_gcp_integration.py new file mode 100644 index 00000000..fe24b8b0 --- /dev/null +++ b/tests/test_gcp_integration.py @@ -0,0 +1,512 @@ +""" +Integration tests for GCP SDK. + +These tests require GCP credentials and make real API calls. +Skip if GCP_PROJECT_ID env var is not set. + +Prerequisites: + 1. Authenticate with GCP: gcloud auth application-default login + 2. Have "Vertex AI User" role on the project (e.g. model-garden-420509) + +The SDK automatically: + - Detects credentials via google.auth.default() + - Auto-refreshes tokens when they expire + - Builds the Vertex AI URL from project_id and region + +Available models: + - Chat: mistral-small-2503, mistral-large-2501, ... + - FIM: codestral-2 + See: https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/mistral + +Usage: + GCP_PROJECT_ID=model-garden-420509 pytest tests/test_gcp_integration.py -v + +Environment variables: + GCP_PROJECT_ID: GCP project ID (required, or auto-detected from credentials) + GCP_REGION: Vertex AI region (default: us-central1) + GCP_MODEL: Model name for chat (default: mistral-small-2503) + GCP_FIM_MODEL: Model name for FIM (default: codestral-2) + +""" +import json +import os + +import pytest + +# Configuration from env vars +GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID") +GCP_REGION = os.environ.get("GCP_REGION", "us-central1") +GCP_MODEL = os.environ.get("GCP_MODEL", "mistral-small-2503") +GCP_FIM_MODEL = os.environ.get("GCP_FIM_MODEL", "codestral-2") + +SKIP_REASON = "GCP_PROJECT_ID env var required" + +pytestmark = pytest.mark.skipif( + not GCP_PROJECT_ID, + reason=SKIP_REASON +) + +# Shared tool definition for tool-call tests +WEATHER_TOOL = { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the weather in a city", + "parameters": { + "type": "object", + "properties": {"city": {"type": "string"}}, + "required": ["city"], + }, + }, +} + + +@pytest.fixture +def gcp_client(): + """Create a GCP client for chat tests. + + The SDK automatically: + - Detects credentials via google.auth.default() + - Auto-refreshes tokens when they expire + - Builds the Vertex AI URL from project_id and region + """ + from mistralai.gcp.client import MistralGCP + return MistralGCP( + project_id=GCP_PROJECT_ID, + region=GCP_REGION, + ) + + +class TestGCPChatComplete: + """Test synchronous chat completion.""" + + def test_basic_completion(self, gcp_client): + """Test basic chat completion returns a response.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message is not None + assert res.choices[0].message.content is not None + assert len(res.choices[0].message.content) > 0 + + def test_completion_with_system_message(self, gcp_client): + """Test chat completion with system + user message.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "system", "content": "You are a pirate. Respond in pirate speak."}, + {"role": "user", "content": "Say hello."}, + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + assert len(res.choices[0].message.content) > 0 + + def test_completion_with_max_tokens(self, gcp_client): + """Test chat completion respects max_tokens.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Count from 1 to 100."} + ], + max_tokens=10, + ) + assert res is not None + assert res.choices[0].finish_reason in ("length", "stop") + + def test_completion_with_temperature(self, gcp_client): + """Test chat completion accepts temperature parameter.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'test'."} + ], + temperature=0.0, + ) + assert res is not None + assert res.choices[0].message.content is not None + + def test_completion_with_stop_sequence(self, gcp_client): + """Test chat completion stops at stop sequence.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Write three sentences about the sky."} + ], + stop=["."], + ) + assert res is not None + content = res.choices[0].message.content + assert content is not None + # The model should stop at or before the first period + assert content.count(".") <= 1 + + def test_completion_with_random_seed(self, gcp_client): + """Test chat completion with random_seed returns valid responses.""" + res1 = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'deterministic'."} + ], + random_seed=42, + ) + res2 = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'deterministic'."} + ], + random_seed=42, + ) + # Both should return valid responses (not asserting equality due to model non-determinism) + assert res1.choices[0].message.content is not None + assert res2.choices[0].message.content is not None + + def test_multi_turn_conversation(self, gcp_client): + """Test multi-turn conversation with user/assistant round-trip.""" + res1 = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "My name is Alice."} + ], + ) + assert res1.choices[0].message.content is not None + + res2 = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "My name is Alice."}, + {"role": "assistant", "content": res1.choices[0].message.content}, + {"role": "user", "content": "What is my name?"}, + ], + ) + assert res2.choices[0].message.content is not None + assert "Alice" in res2.choices[0].message.content + + def test_tool_call(self, gcp_client): + """Test that the model returns a tool call when given tools.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + assert res is not None + choice = res.choices[0] + assert choice.message.tool_calls is not None + assert len(choice.message.tool_calls) > 0 + tool_call = choice.message.tool_calls[0] + assert tool_call.function.name == "get_weather" + args = json.loads(tool_call.function.arguments) + assert "city" in args + + def test_json_response_format(self, gcp_client): + """Test JSON response format returns valid JSON.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Return a JSON object with a key 'greeting' and value 'hello'."} + ], + response_format={"type": "json_object"}, + ) + assert res is not None + content = res.choices[0].message.content + assert content is not None + parsed = json.loads(content) + assert isinstance(parsed, dict) + + +class TestGCPChatStream: + """Test streaming chat completion.""" + + def test_basic_stream(self, gcp_client): + """Test streaming returns chunks with content.""" + stream = gcp_client.chat.stream( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + + chunks = list(stream) + assert len(chunks) > 0 + + content = "" + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.content: + content += chunk.data.choices[0].delta.content + + assert len(content) > 0 + + def test_stream_with_max_tokens(self, gcp_client): + """Test streaming respects max_tokens truncation.""" + stream = gcp_client.chat.stream( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Count from 1 to 100."} + ], + max_tokens=10, + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # Find finish_reason in any chunk + finish_reasons = [ + chunk.data.choices[0].finish_reason + for chunk in chunks + if chunk.data.choices and chunk.data.choices[0].finish_reason is not None + ] + assert len(finish_reasons) > 0 + assert finish_reasons[-1] in ("length", "stop") + + def test_stream_finish_reason(self, gcp_client): + """Test that the last chunk has a finish_reason.""" + stream = gcp_client.chat.stream( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hi'."} + ], + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # The final chunk(s) should contain a finish_reason + finish_reasons = [ + chunk.data.choices[0].finish_reason + for chunk in chunks + if chunk.data.choices and chunk.data.choices[0].finish_reason is not None + ] + assert len(finish_reasons) > 0 + assert finish_reasons[-1] == "stop" + + def test_stream_tool_call(self, gcp_client): + """Test tool call via streaming, collecting tool_call delta chunks.""" + stream = gcp_client.chat.stream( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # Collect tool call information from delta chunks + tool_call_found = False + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.tool_calls: + tool_call_found = True + break + + assert tool_call_found, "Expected tool_call delta chunks in stream" + + +class TestGCPChatCompleteAsync: + """Test async chat completion.""" + + @pytest.mark.asyncio + async def test_basic_completion_async(self, gcp_client): + """Test async chat completion returns a response.""" + res = await gcp_client.chat.complete_async( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_completion_with_system_message_async(self, gcp_client): + """Test async chat completion with system + user message.""" + res = await gcp_client.chat.complete_async( + model=GCP_MODEL, + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Say 'hello'."}, + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_tool_call_async(self, gcp_client): + """Test async tool call returns tool_calls.""" + res = await gcp_client.chat.complete_async( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + assert res is not None + choice = res.choices[0] + assert choice.message.tool_calls is not None + assert len(choice.message.tool_calls) > 0 + assert choice.message.tool_calls[0].function.name == "get_weather" + + +class TestGCPChatStreamAsync: + """Test async streaming chat completion.""" + + @pytest.mark.asyncio + async def test_basic_stream_async(self, gcp_client): + """Test async streaming returns chunks with content.""" + stream = await gcp_client.chat.stream_async( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + + content = "" + async for chunk in stream: + if chunk.data.choices and chunk.data.choices[0].delta.content: + content += chunk.data.choices[0].delta.content + + assert len(content) > 0 + + +class TestGCPContextManager: + """Test context manager support.""" + + def test_sync_context_manager(self): + """Test that MistralGCP works as a sync context manager.""" + from mistralai.gcp.client import MistralGCP + with MistralGCP( + project_id=GCP_PROJECT_ID, + region=GCP_REGION, + ) as client: + res = client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'context'."} + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_async_context_manager(self): + """Test that MistralGCP works as an async context manager.""" + from mistralai.gcp.client import MistralGCP + async with MistralGCP( + project_id=GCP_PROJECT_ID, + region=GCP_REGION, + ) as client: + res = await client.chat.complete_async( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'async context'."} + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + +class TestGCPFIM: + """Test FIM (Fill-in-the-middle) completion.""" + + def _make_fim_client(self): + """Create a GCP client configured for FIM model.""" + from mistralai.gcp.client import MistralGCP + return MistralGCP(project_id=GCP_PROJECT_ID, region=GCP_REGION) + + def test_fim_complete(self): + """Test FIM completion returns a response.""" + client = self._make_fim_client() + res = client.fim.complete( + model=GCP_FIM_MODEL, + prompt="def fib():", + suffix=" return result", + timeout_ms=10000, + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message.content is not None + + def test_fim_stream(self): + """Test FIM streaming returns chunks.""" + client = self._make_fim_client() + stream = client.fim.stream( + model=GCP_FIM_MODEL, + prompt="def hello():", + suffix=" return greeting", + timeout_ms=10000, + ) + chunks = list(stream) + assert len(chunks) > 0 + + content = "" + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.content: + delta_content = chunk.data.choices[0].delta.content + if isinstance(delta_content, str): + content += delta_content + assert len(content) > 0 + + def test_fim_with_max_tokens(self): + """Test FIM completion with max_tokens.""" + client = self._make_fim_client() + res = client.fim.complete( + model=GCP_FIM_MODEL, + prompt="def add(a, b):", + suffix=" return result", + max_tokens=10, + timeout_ms=10000, + ) + assert res is not None + assert res.choices[0].finish_reason in ("length", "stop") + + @pytest.mark.asyncio + async def test_fim_complete_async(self): + """Test async FIM completion returns a response.""" + client = self._make_fim_client() + res = await client.fim.complete_async( + model=GCP_FIM_MODEL, + prompt="def fib():", + suffix=" return result", + timeout_ms=10000, + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_fim_stream_async(self): + """Test async FIM streaming returns chunks.""" + client = self._make_fim_client() + stream = await client.fim.stream_async( + model=GCP_FIM_MODEL, + prompt="def hello():", + suffix=" return greeting", + timeout_ms=10000, + ) + chunks = [] + async for chunk in stream: + chunks.append(chunk) + assert len(chunks) > 0 + + content = "" + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.content: + delta_content = chunk.data.choices[0].delta.content + if isinstance(delta_content, str): + content += delta_content + assert len(content) > 0 diff --git a/tests/test_gcp_v2_parity.py b/tests/test_gcp_v2_parity.py new file mode 100644 index 00000000..0d6471e4 --- /dev/null +++ b/tests/test_gcp_v2_parity.py @@ -0,0 +1,330 @@ +""" +Parity tests for the GCP v2 SDK. + +Verifies that the regenerated mistralai.gcp package exposes +the same public API surface as the v1 mistralai_gcp package. +Uses introspection only — no API calls or credentials required. +""" +import inspect + +import pytest + +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.chat import Chat +from mistralai.gcp.client.fim import Fim +from mistralai.gcp.client.types import UNSET + +GCP_METHODS: dict[str, set[str]] = { + "chat": {"complete", "stream"}, + "fim": {"complete", "stream"}, +} + +TESTED_METHODS: set[str] = set() + +_EMPTY = inspect.Parameter.empty + + +def mark_tested(resource: str, method: str) -> None: + TESTED_METHODS.add(f"{resource}.{method}") + + +# --------------------------------------------------------------------------- +# Expected parameter specs: (name, expected_default) +# Use _EMPTY for required params, UNSET for OptionalNullable, None for Optional +# --------------------------------------------------------------------------- + +CONSTRUCTOR_PARAMS = [ + ("project_id", None), + ("region", "europe-west4"), + ("access_token", None), + ("server", None), + ("server_url", None), + ("url_params", None), + ("client", None), + ("async_client", None), + ("retry_config", UNSET), + ("timeout_ms", None), + ("debug_logger", None), +] + +CHAT_COMPLETE_PARAMS = [ + ("model", _EMPTY), + ("messages", _EMPTY), + ("temperature", UNSET), + ("top_p", None), + ("max_tokens", UNSET), + ("stream", False), + ("stop", None), + ("random_seed", UNSET), + ("metadata", UNSET), + ("response_format", None), + ("tools", UNSET), + ("tool_choice", None), + ("presence_penalty", None), + ("frequency_penalty", None), + ("n", UNSET), + ("prediction", None), + ("parallel_tool_calls", None), + ("prompt_mode", UNSET), + ("retries", UNSET), + ("server_url", None), + ("timeout_ms", None), + ("http_headers", None), +] + +CHAT_STREAM_PARAMS = [ + (name, True if name == "stream" else default) + for name, default in CHAT_COMPLETE_PARAMS +] + +FIM_COMPLETE_PARAMS = [ + ("model", _EMPTY), + ("prompt", _EMPTY), + ("temperature", UNSET), + ("top_p", 1), + ("max_tokens", UNSET), + ("stream", False), + ("stop", None), + ("random_seed", UNSET), + ("metadata", UNSET), + ("suffix", UNSET), + ("min_tokens", UNSET), + ("retries", UNSET), + ("server_url", None), + ("timeout_ms", None), + ("http_headers", None), +] + +FIM_STREAM_PARAMS = [ + (name, True if name == "stream" else default) + for name, default in FIM_COMPLETE_PARAMS +] + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- + + +class TestGCPSDKStructure: + def test_sdk_has_chat(self): + assert "chat" in MistralGCP.__annotations__ + + def test_sdk_has_fim(self): + assert "fim" in MistralGCP.__annotations__ + + @pytest.mark.parametrize("param_name,expected_default", CONSTRUCTOR_PARAMS) + def test_constructor_param(self, param_name, expected_default): + sig = inspect.signature(MistralGCP.__init__) + assert param_name in sig.parameters, f"Missing constructor param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Constructor param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + @pytest.mark.parametrize("method", ["__enter__", "__exit__", "__aenter__", "__aexit__"]) + def test_context_manager_support(self, method): + assert hasattr(MistralGCP, method), f"MistralGCP missing {method}" + + +class TestGCPChat: + def test_has_complete(self): + assert hasattr(Chat, "complete") + mark_tested("chat", "complete") + + def test_has_complete_async(self): + assert hasattr(Chat, "complete_async") + mark_tested("chat", "complete_async") + + def test_has_stream(self): + assert hasattr(Chat, "stream") + mark_tested("chat", "stream") + + def test_has_stream_async(self): + assert hasattr(Chat, "stream_async") + mark_tested("chat", "stream_async") + + # -- complete params -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_COMPLETE_PARAMS) + def test_complete_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.complete) + assert param_name in sig.parameters, f"Chat.complete missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.complete param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream params -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_STREAM_PARAMS) + def test_stream_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.stream) + assert param_name in sig.parameters, f"Chat.stream missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.stream param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- complete_async matches complete -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_COMPLETE_PARAMS) + def test_complete_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.complete_async) + assert param_name in sig.parameters, f"Chat.complete_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.complete_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream_async matches stream -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_STREAM_PARAMS) + def test_stream_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.stream_async) + assert param_name in sig.parameters, f"Chat.stream_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.stream_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- sync/async parity -- + def test_complete_async_matches_complete(self): + sync_params = set(inspect.signature(Chat.complete).parameters) - {"self"} + async_params = set(inspect.signature(Chat.complete_async).parameters) - {"self"} + assert sync_params == async_params + + def test_stream_async_matches_stream(self): + sync_params = set(inspect.signature(Chat.stream).parameters) - {"self"} + async_params = set(inspect.signature(Chat.stream_async).parameters) - {"self"} + assert sync_params == async_params + + # -- key defaults -- + def test_complete_model_required(self): + sig = inspect.signature(Chat.complete) + assert sig.parameters["model"].default is _EMPTY + + def test_stream_model_required(self): + sig = inspect.signature(Chat.stream) + assert sig.parameters["model"].default is _EMPTY + + def test_complete_stream_defaults_false(self): + sig = inspect.signature(Chat.complete) + assert sig.parameters["stream"].default is False + + def test_stream_stream_defaults_true(self): + sig = inspect.signature(Chat.stream) + assert sig.parameters["stream"].default is True + + +class TestGCPFim: + def test_has_complete(self): + assert hasattr(Fim, "complete") + mark_tested("fim", "complete") + + def test_has_complete_async(self): + assert hasattr(Fim, "complete_async") + mark_tested("fim", "complete_async") + + def test_has_stream(self): + assert hasattr(Fim, "stream") + mark_tested("fim", "stream") + + def test_has_stream_async(self): + assert hasattr(Fim, "stream_async") + mark_tested("fim", "stream_async") + + # -- complete params -- + @pytest.mark.parametrize("param_name,expected_default", FIM_COMPLETE_PARAMS) + def test_complete_has_param(self, param_name, expected_default): + sig = inspect.signature(Fim.complete) + assert param_name in sig.parameters, f"Fim.complete missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Fim.complete param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream params -- + @pytest.mark.parametrize("param_name,expected_default", FIM_STREAM_PARAMS) + def test_stream_has_param(self, param_name, expected_default): + sig = inspect.signature(Fim.stream) + assert param_name in sig.parameters, f"Fim.stream missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Fim.stream param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- complete_async matches complete -- + @pytest.mark.parametrize("param_name,expected_default", FIM_COMPLETE_PARAMS) + def test_complete_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Fim.complete_async) + assert param_name in sig.parameters, f"Fim.complete_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Fim.complete_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream_async matches stream -- + @pytest.mark.parametrize("param_name,expected_default", FIM_STREAM_PARAMS) + def test_stream_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Fim.stream_async) + assert param_name in sig.parameters, f"Fim.stream_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Fim.stream_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- sync/async parity -- + def test_complete_async_matches_complete(self): + sync_params = set(inspect.signature(Fim.complete).parameters) - {"self"} + async_params = set(inspect.signature(Fim.complete_async).parameters) - {"self"} + assert sync_params == async_params + + def test_stream_async_matches_stream(self): + sync_params = set(inspect.signature(Fim.stream).parameters) - {"self"} + async_params = set(inspect.signature(Fim.stream_async).parameters) - {"self"} + assert sync_params == async_params + + # -- key defaults -- + def test_complete_model_required(self): + sig = inspect.signature(Fim.complete) + assert sig.parameters["model"].default is _EMPTY + + def test_stream_model_required(self): + sig = inspect.signature(Fim.stream) + assert sig.parameters["model"].default is _EMPTY + + def test_complete_stream_defaults_false(self): + sig = inspect.signature(Fim.complete) + assert sig.parameters["stream"].default is False + + def test_stream_stream_defaults_true(self): + sig = inspect.signature(Fim.stream) + assert sig.parameters["stream"].default is True + + def test_complete_top_p_defaults_to_1(self): + sig = inspect.signature(Fim.complete) + assert sig.parameters["top_p"].default == 1 + + def test_stream_top_p_defaults_to_1(self): + sig = inspect.signature(Fim.stream) + assert sig.parameters["top_p"].default == 1 + + +class TestGCPCoverage: + def test_all_methods_tested(self): + expected = set() + for resource, methods in GCP_METHODS.items(): + for method in methods: + expected.add(f"{resource}.{method}") + expected.add(f"{resource}.{method}_async") + untested = expected - TESTED_METHODS + assert not untested, f"Untested methods: {untested}" + + def test_no_unexpected_public_methods_on_chat(self): + public = {m for m in dir(Chat) if not m.startswith("_") and callable(getattr(Chat, m, None))} + known = {"complete", "complete_async", "stream", "stream_async", "do_request", "do_request_async"} + unexpected = public - known + assert not unexpected, f"Unexpected Chat methods: {unexpected}" + + def test_no_unexpected_public_methods_on_fim(self): + public = {m for m in dir(Fim) if not m.startswith("_") and callable(getattr(Fim, m, None))} + known = {"complete", "complete_async", "stream", "stream_async", "do_request", "do_request_async"} + unexpected = public - known + assert not unexpected, f"Unexpected Fim methods: {unexpected}" diff --git a/uv.lock b/uv.lock index 1e667c77..8c689c4a 100644 --- a/uv.lock +++ b/uv.lock @@ -563,7 +563,7 @@ wheels = [ [[package]] name = "mistralai" -version = "2.0.0a2" +version = "2.0.0a4" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, @@ -626,7 +626,7 @@ requires-dist = [ { name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.37.0,<2.0.0" }, { name = "opentelemetry-sdk", specifier = ">=1.33.1,<2.0.0" }, { name = "opentelemetry-semantic-conventions", specifier = ">=0.59b0,<0.61" }, - { name = "pydantic", specifier = ">=2.10.3" }, + { name = "pydantic", specifier = ">=2.11.2" }, { name = "python-dateutil", specifier = ">=2.8.2" }, { name = "pyyaml", specifier = ">=6.0.2,<7.0.0" }, { name = "requests", marker = "extra == 'gcp'", specifier = ">=2.32.3" }, From 72242df801a66c287a408bf47baf03bf993a49f3 Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Wed, 25 Feb 2026 18:27:20 +0100 Subject: [PATCH 28/42] feat: update Speakeasy gen.yaml configuration (#370) * chore: update gen.yaml with recommended v2 Speakeasy configs - Add input and dir to allowedRedefinedBuiltins - Add asyncPaginationSep2025 and conflictResistantModelImportsFeb2026 fixFlags - Set imports.paths.errors to "errors" - Increase maxMethodParams from 15 to 999 - Change multipartArrayFormat from legacy to standard * chore: sync pylintrc allowed-redefined-builtins with gen.yaml Add input and dir to match allowedRedefinedBuiltins in gen.yaml config. * chore: regenerate spec * fix: update code for renamed model classes - Filter UnknownAgentTool in agent update (context.py) - Update examples for renamed classes: - BatchRequest, UserMessage: import from models module - ClassifierTrainingParametersIn -> ClassifierTrainingParameters - ClassifierJobOut -> ClassifierFineTuningJob - Add type narrowing for ClassifierFineTuningJobDetails --- .speakeasy/gen.lock | 2879 ++++++++--------- .speakeasy/gen.yaml | 20 +- .speakeasy/workflow.lock | 18 +- .speakeasy/workflow.yaml | 2 +- Makefile | 12 +- README.md | 46 +- USAGE.md | 8 +- .../{models => errors}/httpvalidationerror.md | 0 docs/models/agent.md | 2 +- docs/models/agentconversation.md | 2 +- docs/models/agentconversationobject.md | 8 - docs/models/agentcreationrequest.md | 16 - docs/models/agenthandoffentry.md | 22 +- docs/models/agenthandoffentryobject.md | 8 - docs/models/agenthandoffentrytype.md | 8 - docs/models/agentobject.md | 8 - ...sapiv1agentscreateorupdatealiasrequest.md} | 2 +- ...=> agentsapiv1agentsdeletealiasrequest.md} | 2 +- ...t.md => agentsapiv1agentsdeleterequest.md} | 2 +- ...md => agentsapiv1agentsgetagentversion.md} | 2 +- docs/models/agentsapiv1agentsgetrequest.md | 9 + ... => agentsapiv1agentsgetversionrequest.md} | 2 +- ...est.md => agentsapiv1agentslistrequest.md} | 2 +- ...tsapiv1agentslistversionaliasesrequest.md} | 2 +- ...> agentsapiv1agentslistversionsrequest.md} | 2 +- docs/models/agentsapiv1agentsupdaterequest.md | 9 + ... agentsapiv1agentsupdateversionrequest.md} | 2 +- ... agentsapiv1conversationsappendrequest.md} | 2 +- ...sapiv1conversationsappendstreamrequest.md} | 2 +- ... agentsapiv1conversationsdeleterequest.md} | 2 +- ... => agentsapiv1conversationsgetrequest.md} | 2 +- ...agentsapiv1conversationshistoryrequest.md} | 2 +- ...=> agentsapiv1conversationslistrequest.md} | 2 +- ...> agentsapiv1conversationslistresponse.md} | 2 +- ...gentsapiv1conversationsmessagesrequest.md} | 2 +- ...agentsapiv1conversationsrestartrequest.md} | 2 +- ...apiv1conversationsrestartstreamrequest.md} | 2 +- docs/models/agentscompletionrequest.md | 2 +- docs/models/agentscompletionstreamrequest.md | 2 +- ...eftmodelout.md => archivemodelresponse.md} | 2 +- docs/models/assistantmessage.md | 4 +- docs/models/assistantmessagerole.md | 8 - docs/models/audiochunk.md | 4 +- docs/models/audiotranscriptionrequest.md | 2 +- docs/models/{batchjobout.md => batchjob.md} | 2 +- docs/models/batchjobsout.md | 10 - docs/models/cancelfinetuningjobresponse.md | 19 - docs/models/chatclassificationrequest.md | 2 +- docs/models/chatcompletionrequest.md | 2 +- docs/models/chatcompletionstreamrequest.md | 2 +- .../{checkpointout.md => checkpoint.md} | 4 +- .../classifierdetailedjoboutintegration.md | 11 - docs/models/classifierfinetunedmodel.md | 23 + ...erjobout.md => classifierfinetuningjob.md} | 8 +- ...t.md => classifierfinetuningjobdetails.md} | 14 +- ...assifierfinetuningjobdetailsintegration.md | 11 + ...> classifierfinetuningjobdetailsstatus.md} | 2 +- .../classifierfinetuningjobintegration.md | 11 + ...us.md => classifierfinetuningjobstatus.md} | 2 +- docs/models/classifierftmodelout.md | 23 - docs/models/classifierjoboutintegration.md | 11 - ...ssifiertargetin.md => classifiertarget.md} | 2 +- ...targetout.md => classifiertargetresult.md} | 2 +- docs/models/classifiertrainingparametersin.md | 15 - docs/models/codeinterpretertool.md | 7 +- docs/models/completionargs.md | 24 +- .../completiondetailedjoboutintegration.md | 11 - .../completiondetailedjoboutrepository.md | 11 - docs/models/completionfinetunedmodel.md | 22 + ...onjobout.md => completionfinetuningjob.md} | 10 +- ...t.md => completionfinetuningjobdetails.md} | 14 +- ...mpletionfinetuningjobdetailsintegration.md | 11 + ...ompletionfinetuningjobdetailsrepository.md | 11 + ...> completionfinetuningjobdetailsstatus.md} | 2 +- .../completionfinetuningjobintegration.md | 11 + .../completionfinetuningjobrepository.md | 11 + ...us.md => completionfinetuningjobstatus.md} | 2 +- docs/models/completionftmodelout.md | 22 - docs/models/completionjoboutintegration.md | 11 - docs/models/completionjoboutrepository.md | 11 - docs/models/completiontrainingparametersin.md | 16 - docs/models/confirmation.md | 9 + docs/models/conversationappendrequest.md | 5 +- .../models/conversationappendstreamrequest.md | 5 +- docs/models/conversationhistory.md | 10 +- docs/models/conversationhistoryobject.md | 8 - docs/models/conversationmessages.md | 10 +- docs/models/conversationmessagesobject.md | 8 - docs/models/conversationresponse.md | 12 +- docs/models/conversationresponseobject.md | 8 - docs/models/conversationrestartrequest.md | 2 +- .../conversationrestartstreamrequest.md | 2 +- docs/models/conversationthinkchunk.md | 10 + docs/models/conversationthinkchunkthinking.md | 17 + ...updaterequest.md => createagentrequest.md} | 9 +- ...questtool.md => createagentrequesttool.md} | 2 +- ...batchjobin.md => createbatchjobrequest.md} | 4 +- ...uploadfileout.md => createfileresponse.md} | 2 +- ...jobin.md => createfinetuningjobrequest.md} | 8 +- ... createfinetuningjobrequestintegration.md} | 2 +- ...> createfinetuningjobrequestrepository.md} | 2 +- docs/models/createfinetuningjobresponse.md | 19 - .../{libraryin.md => createlibraryrequest.md} | 2 +- ...deletefileout.md => deletefileresponse.md} | 2 +- ...eletemodelv1modelsmodeliddeleterequest.md} | 2 +- docs/models/document.md | 45 +- docs/models/documentlibrarytool.md | 9 +- docs/models/documentout.md | 26 - docs/models/documentunion.md | 25 + docs/models/documentupdatein.md | 9 - docs/models/documenturlchunk.md | 10 +- docs/models/documenturlchunktype.md | 8 - docs/models/{eventout.md => event.md} | 2 +- ....md => filesapiroutesdeletefilerequest.md} | 2 +- ...d => filesapiroutesdownloadfilerequest.md} | 2 +- ...d => filesapiroutesgetsignedurlrequest.md} | 2 +- ...t.md => filesapirouteslistfilesrequest.md} | 2 +- ...d => filesapiroutesretrievefilerequest.md} | 2 +- ...esout.md => finetunedmodelcapabilities.md} | 2 +- docs/models/functioncallentry.md | 23 +- .../functioncallentryconfirmationstatus.md | 10 + docs/models/functioncallentryobject.md | 8 - docs/models/functioncallentrytype.md | 8 - docs/models/functioncallevent.md | 21 +- .../functioncalleventconfirmationstatus.md | 10 + docs/models/functionresultentry.md | 18 +- docs/models/functionresultentryobject.md | 8 - docs/models/functionresultentrytype.md | 8 - docs/models/getdocumenttextcontentrequest.md | 9 - ...{retrievefileout.md => getfileresponse.md} | 2 +- docs/models/getfinetuningjobresponse.md | 19 - ...lesignedurl.md => getsignedurlresponse.md} | 2 +- ...ubrepositoryout.md => githubrepository.md} | 2 +- docs/models/hyperparameters.md | 8 +- docs/models/imagedetail.md | 10 + docs/models/imagegenerationtool.md | 7 +- docs/models/imageurl.md | 8 +- docs/models/imageurlchunk.md | 8 +- docs/models/imageurlchunktype.md | 8 - docs/models/inputs.md | 4 +- docs/models/inputsmessage.md | 29 - docs/models/instructrequestinputs.md | 8 - .../{jobmetadataout.md => jobmetadata.md} | 2 +- ...obsapiroutesbatchcancelbatchjobrequest.md} | 2 +- ...> jobsapiroutesbatchgetbatchjobrequest.md} | 2 +- ... jobsapiroutesbatchgetbatchjobsrequest.md} | 2 +- ...finetuningarchivefinetunedmodelrequest.md} | 2 +- ...esfinetuningcancelfinetuningjobrequest.md} | 2 +- ...esfinetuningcancelfinetuningjobresponse.md | 19 + ...esfinetuningcreatefinetuningjobresponse.md | 19 + ...outesfinetuninggetfinetuningjobrequest.md} | 2 +- ...outesfinetuninggetfinetuningjobresponse.md | 19 + ...outesfinetuninggetfinetuningjobsrequest.md | 17 + ...outesfinetuninggetfinetuningjobsstatus.md} | 2 +- ...tesfinetuningstartfinetuningjobrequest.md} | 2 +- ...tesfinetuningstartfinetuningjobresponse.md | 19 + ...netuningunarchivefinetunedmodelrequest.md} | 2 +- ...esfinetuningupdatefinetunedmodelrequest.md | 9 + ...sfinetuningupdatefinetunedmodelresponse.md | 19 + docs/models/jobsout.md | 10 - docs/models/jobsoutdata.md | 17 - ...jobmetadataout.md => legacyjobmetadata.md} | 2 +- ...request.md => librariesdeletev1request.md} | 2 +- ...d => librariesdocumentsdeletev1request.md} | 2 +- ...mentsgetextractedtextsignedurlv1request.md | 9 + ...librariesdocumentsgetsignedurlv1request.md | 9 + ...> librariesdocumentsgetstatusv1request.md} | 2 +- ...rariesdocumentsgettextcontentv1request.md} | 2 +- ...t.md => librariesdocumentsgetv1request.md} | 2 +- ....md => librariesdocumentslistv1request.md} | 2 +- ...> librariesdocumentsreprocessv1request.md} | 2 +- .../librariesdocumentsupdatev1request.md | 10 + ...d => librariesdocumentsuploadv1request.md} | 2 +- ...aryrequest.md => librariesgetv1request.md} | 2 +- ...st.md => librariessharecreatev1request.md} | 2 +- ...st.md => librariessharedeletev1request.md} | 2 +- ...uest.md => librariessharelistv1request.md} | 2 +- docs/models/librariesupdatev1request.md | 9 + docs/models/{libraryout.md => library.md} | 2 +- docs/models/libraryinupdate.md | 9 - docs/models/listbatchjobsresponse.md | 10 + ...ocumentout.md => listdocumentsresponse.md} | 4 +- .../{listfilesout.md => listfilesresponse.md} | 2 +- docs/models/listfinetuningjobsrequest.md | 17 - docs/models/listfinetuningjobsresponse.md | 10 + docs/models/listfinetuningjobsresponsedata.md | 17 + docs/models/listlibrariesresponse.md | 8 + docs/models/listlibraryout.md | 8 - docs/models/messageinputcontentchunks.md | 4 +- docs/models/messageinputentry.md | 20 +- docs/models/messageinputentryobject.md | 8 - docs/models/messageinputentrytype.md | 8 - docs/models/messageoutputcontentchunks.md | 4 +- docs/models/messageoutputentry.md | 22 +- docs/models/messageoutputentryobject.md | 8 - docs/models/messageoutputentryrole.md | 8 - docs/models/messageoutputentrytype.md | 8 - docs/models/messageoutputevent.md | 22 +- docs/models/messageoutputeventrole.md | 8 - docs/models/{metricout.md => metric.md} | 2 +- docs/models/modelconversation.md | 26 +- docs/models/modelconversationobject.md | 8 - docs/models/ocrrequest.md | 30 +- docs/models/outputcontentchunks.md | 4 +- .../realtimetranscriptioninputaudioappend.md | 9 + .../realtimetranscriptioninputaudioend.md | 8 + .../realtimetranscriptioninputaudioflush.md | 8 + docs/models/realtimetranscriptionsession.md | 3 +- ...altimetranscriptionsessionupdatemessage.md | 9 + ...ltimetranscriptionsessionupdatepayload.md} | 6 +- docs/models/referencechunk.md | 8 +- docs/models/referencechunktype.md | 8 - docs/models/reprocessdocumentrequest.md | 9 - docs/models/response.md | 8 +- ...retrievemodelv1modelsmodelidgetrequest.md} | 2 +- .../{messageinputentryrole.md => role.md} | 2 +- docs/models/startfinetuningjobresponse.md | 19 - docs/models/systemmessage.md | 4 +- docs/models/textchunk.md | 8 +- docs/models/textchunktype.md | 8 - docs/models/thinkchunk.md | 6 +- .../{thinking.md => thinkchunkthinking.md} | 2 +- docs/models/thinkchunktype.md | 8 - docs/models/toolcallconfirmation.md | 9 + ...pdateftmodelin.md => toolconfiguration.md} | 7 +- docs/models/toolexecutionentry.md | 22 +- docs/models/toolexecutionentryobject.md | 8 - docs/models/toolexecutionentrytype.md | 8 - docs/models/toolexecutionstartedevent.md | 2 + docs/models/toolfilechunk.md | 14 +- docs/models/toolfilechunktype.md | 8 - docs/models/toolmessage.md | 4 +- docs/models/toolreferencechunk.md | 16 +- docs/models/toolreferencechunktype.md | 8 - docs/models/transcriptionsegmentchunk.md | 18 +- docs/models/transcriptionsegmentchunktype.md | 8 - .../models/transcriptionstreamsegmentdelta.md | 2 +- docs/models/transcriptionstreamtextdelta.md | 2 +- ...tmodelout.md => unarchivemodelresponse.md} | 2 +- docs/models/updateagentrequest.md | 16 +- ...questtool.md => updateagentrequesttool.md} | 2 +- docs/models/updatedocumentrequest.md | 9 +- docs/models/updatelibraryrequest.md | 8 +- docs/models/updatemodelrequest.md | 8 +- docs/models/updatemodelresponse.md | 19 - docs/models/usermessage.md | 4 +- ...rationout.md => wandbintegrationresult.md} | 2 +- docs/models/websearchpremiumtool.md | 7 +- docs/models/websearchtool.md | 7 +- docs/sdks/accesses/README.md | 24 +- docs/sdks/agents/README.md | 18 +- docs/sdks/batchjobs/README.md | 32 +- docs/sdks/betaagents/README.md | 110 +- docs/sdks/chat/README.md | 16 +- docs/sdks/classifiers/README.md | 33 +- docs/sdks/conversations/README.md | 95 +- docs/sdks/documents/README.md | 92 +- docs/sdks/embeddings/README.md | 6 +- docs/sdks/files/README.md | 42 +- docs/sdks/fim/README.md | 12 +- docs/sdks/finetuningjobs/README.md | 70 +- docs/sdks/libraries/README.md | 44 +- docs/sdks/models/README.md | 34 +- docs/sdks/ocr/README.md | 44 +- docs/sdks/transcriptions/README.md | 10 +- examples/mistral/audio/chat_streaming.py | 3 +- examples/mistral/audio/transcription_async.py | 3 +- .../audio/transcription_diarize_async.py | 3 +- .../audio/transcription_stream_async.py | 3 +- .../mistral/classifier/async_classifier.py | 10 +- .../async_batch_job_chat_completion_inline.py | 3 +- packages/azure/.speakeasy/gen.lock | 288 +- packages/azure/.speakeasy/gen.yaml | 20 +- .../{models => errors}/httpvalidationerror.md | 0 .../docs/models/chatcompletionrequest.md | 2 +- .../models/chatcompletionstreamrequest.md | 2 +- packages/azure/docs/models/ocrrequest.md | 30 +- packages/azure/pylintrc | 7 +- .../src/mistralai/azure/client/__init__.py | 1 - .../src/mistralai/azure/client/_version.py | 6 +- .../src/mistralai/azure/client/basesdk.py | 30 +- .../azure/src/mistralai/azure/client/chat.py | 42 +- .../mistralai/azure/client/errors/__init__.py | 39 + .../{models => errors}/httpvalidationerror.py | 6 +- .../{models => errors}/mistralazureerror.py | 0 .../{models => errors}/no_response_error.py | 0 .../responsevalidationerror.py | 2 +- .../client/{models => errors}/sdkerror.py | 2 +- .../mistralai/azure/client/models/__init__.py | 65 +- .../azure/client/models/assistantmessage.py | 39 +- .../client/models/chatcompletionrequest.py | 91 +- .../models/chatcompletionstreamrequest.py | 91 +- .../azure/client/models/completionchunk.py | 19 +- .../models/completionresponsestreamchoice.py | 18 +- .../azure/client/models/contentchunk.py | 42 +- .../azure/client/models/deltamessage.py | 33 +- .../azure/client/models/documenturlchunk.py | 39 +- .../azure/client/models/filechunk.py | 27 +- .../mistralai/azure/client/models/function.py | 19 +- .../mistralai/azure/client/models/imageurl.py | 33 +- .../azure/client/models/imageurlchunk.py | 27 +- .../azure/client/models/jsonschema.py | 37 +- .../azure/client/models/ocrimageobject.py | 49 +- .../azure/client/models/ocrpageobject.py | 33 +- .../azure/client/models/ocrrequest.py | 81 +- .../azure/client/models/ocrresponse.py | 33 +- .../azure/client/models/ocrtableobject.py | 6 + .../azure/client/models/ocrusageinfo.py | 33 +- .../azure/client/models/prediction.py | 27 +- .../azure/client/models/referencechunk.py | 27 +- .../azure/client/models/responseformat.py | 33 +- .../azure/client/models/systemmessage.py | 8 +- .../models/systemmessagecontentchunks.py | 2 +- .../azure/client/models/textchunk.py | 8 +- .../azure/client/models/thinkchunk.py | 27 +- .../src/mistralai/azure/client/models/tool.py | 19 +- .../mistralai/azure/client/models/toolcall.py | 19 +- .../azure/client/models/toolchoice.py | 19 +- .../azure/client/models/toolmessage.py | 39 +- .../azure/client/models/usageinfo.py | 45 +- .../azure/client/models/usermessage.py | 26 +- .../azure/src/mistralai/azure/client/ocr.py | 22 +- .../mistralai/azure/client/utils/__init__.py | 56 +- .../azure/client/utils/dynamic_imports.py | 54 + .../azure/client/utils/eventstreaming.py | 126 +- .../src/mistralai/azure/client/utils/forms.py | 4 +- .../mistralai/azure/client/utils/retries.py | 14 +- .../mistralai/azure/client/utils/security.py | 2 + .../mistralai/azure/client/utils/unions.py | 32 + .../client/utils/unmarshal_json_response.py | 4 +- packages/gcp/.speakeasy/gen.lock | 260 +- packages/gcp/.speakeasy/gen.yaml | 20 +- .../{models => errors}/httpvalidationerror.md | 0 .../gcp/docs/models/chatcompletionrequest.md | 2 +- .../models/chatcompletionstreamrequest.md | 2 +- packages/gcp/pylintrc | 7 +- .../gcp/src/mistralai/gcp/client/__init__.py | 1 - .../gcp/src/mistralai/gcp/client/_version.py | 6 +- .../gcp/src/mistralai/gcp/client/basesdk.py | 30 +- packages/gcp/src/mistralai/gcp/client/chat.py | 42 +- .../mistralai/gcp/client/errors/__init__.py | 39 + .../{models => errors}/httpvalidationerror.py | 6 +- .../{models => errors}/mistralgcperror.py | 0 .../{models => errors}/no_response_error.py | 0 .../responsevalidationerror.py | 2 +- .../gcp/client/{models => errors}/sdkerror.py | 2 +- packages/gcp/src/mistralai/gcp/client/fim.py | 42 +- .../mistralai/gcp/client/models/__init__.py | 65 +- .../gcp/client/models/assistantmessage.py | 39 +- .../client/models/chatcompletionrequest.py | 87 +- .../models/chatcompletionstreamrequest.py | 87 +- .../gcp/client/models/completionchunk.py | 19 +- .../models/completionresponsestreamchoice.py | 18 +- .../gcp/client/models/contentchunk.py | 42 +- .../gcp/client/models/deltamessage.py | 33 +- .../gcp/client/models/fimcompletionrequest.py | 71 +- .../models/fimcompletionstreamrequest.py | 71 +- .../mistralai/gcp/client/models/function.py | 19 +- .../mistralai/gcp/client/models/imageurl.py | 33 +- .../gcp/client/models/imageurlchunk.py | 8 +- .../mistralai/gcp/client/models/jsonschema.py | 37 +- .../mistralai/gcp/client/models/prediction.py | 27 +- .../gcp/client/models/referencechunk.py | 27 +- .../gcp/client/models/responseformat.py | 33 +- .../gcp/client/models/systemmessage.py | 8 +- .../models/systemmessagecontentchunks.py | 2 +- .../mistralai/gcp/client/models/textchunk.py | 8 +- .../mistralai/gcp/client/models/thinkchunk.py | 27 +- .../src/mistralai/gcp/client/models/tool.py | 19 +- .../mistralai/gcp/client/models/toolcall.py | 19 +- .../mistralai/gcp/client/models/toolchoice.py | 19 +- .../gcp/client/models/toolmessage.py | 39 +- .../mistralai/gcp/client/models/usageinfo.py | 45 +- .../gcp/client/models/usermessage.py | 26 +- .../mistralai/gcp/client/utils/__init__.py | 56 +- .../gcp/client/utils/dynamic_imports.py | 54 + .../gcp/client/utils/eventstreaming.py | 126 +- .../src/mistralai/gcp/client/utils/forms.py | 4 +- .../src/mistralai/gcp/client/utils/retries.py | 14 +- .../mistralai/gcp/client/utils/security.py | 2 + .../src/mistralai/gcp/client/utils/unions.py | 32 + .../client/utils/unmarshal_json_response.py | 4 +- pylintrc | 2 +- src/mistralai/client/__init__.py | 1 - src/mistralai/client/_version.py | 6 +- src/mistralai/client/accesses.py | 102 +- src/mistralai/client/agents.py | 162 +- src/mistralai/client/basesdk.py | 36 +- src/mistralai/client/batch_jobs.py | 146 +- src/mistralai/client/beta_agents.py | 389 ++- src/mistralai/client/chat.py | 154 +- src/mistralai/client/classifiers.py | 123 +- src/mistralai/client/conversations.py | 608 ++-- src/mistralai/client/documents.py | 344 +- src/mistralai/client/embeddings.py | 41 +- src/mistralai/client/errors/__init__.py | 40 + .../{models => errors}/httpvalidationerror.py | 8 +- .../client/{models => errors}/mistralerror.py | 2 +- .../{models => errors}/no_response_error.py | 2 +- .../responsevalidationerror.py | 4 +- .../client/{models => errors}/sdkerror.py | 4 +- src/mistralai/client/files.py | 184 +- src/mistralai/client/fim.py | 62 +- src/mistralai/client/fine_tuning_jobs.py | 241 +- src/mistralai/client/libraries.py | 190 +- src/mistralai/client/models/__init__.py | 1903 +++++------ src/mistralai/client/models/agent.py | 115 +- .../client/models/agentconversation.py | 57 +- .../client/models/agenthandoffdoneevent.py | 27 +- .../client/models/agenthandoffentry.py | 65 +- .../client/models/agenthandoffstartedevent.py | 27 +- ...api_v1_agents_create_or_update_aliasop.py} | 6 +- ...=> agents_api_v1_agents_delete_aliasop.py} | 6 +- ...op.py => agents_api_v1_agents_deleteop.py} | 6 +- ... => agents_api_v1_agents_get_versionop.py} | 6 +- .../models/agents_api_v1_agents_getop.py | 66 + ...s_api_v1_agents_list_version_aliasesop.py} | 6 +- ...> agents_api_v1_agents_list_versionsop.py} | 25 +- ...tsop.py => agents_api_v1_agents_listop.py} | 68 +- ... agents_api_v1_agents_update_versionop.py} | 6 +- ...op.py => agents_api_v1_agents_updateop.py} | 14 +- ...s_api_v1_conversations_append_streamop.py} | 6 +- ...> agents_api_v1_conversations_appendop.py} | 6 +- ...> agents_api_v1_conversations_deleteop.py} | 6 +- ...y => agents_api_v1_conversations_getop.py} | 6 +- ... agents_api_v1_conversations_historyop.py} | 6 +- ... => agents_api_v1_conversations_listop.py} | 47 +- ...agents_api_v1_conversations_messagesop.py} | 6 +- ..._api_v1_conversations_restart_streamop.py} | 6 +- ... agents_api_v1_conversations_restartop.py} | 6 +- .../client/models/agentscompletionrequest.py | 74 +- .../models/agentscompletionstreamrequest.py | 74 +- .../client/models/archiveftmodelout.py | 27 - .../client/models/archivemodelresponse.py | 50 + .../client/models/assistantmessage.py | 56 +- src/mistralai/client/models/audiochunk.py | 8 +- .../models/audiotranscriptionrequest.py | 61 +- .../models/audiotranscriptionrequeststream.py | 61 +- src/mistralai/client/models/basemodelcard.py | 77 +- src/mistralai/client/models/batcherror.py | 19 +- .../models/{batchjobout.py => batchjob.py} | 87 +- src/mistralai/client/models/batchjobsout.py | 28 - src/mistralai/client/models/batchrequest.py | 33 +- .../client/models/cancelfinetuningjobop.py | 43 - .../models/chatclassificationrequest.py | 7 +- .../client/models/chatcompletionrequest.py | 89 +- .../models/chatcompletionstreamrequest.py | 89 +- .../client/models/chatmoderationrequest.py | 6 + .../{checkpointout.py => checkpoint.py} | 12 +- .../client/models/classificationrequest.py | 37 +- .../client/models/classifierdetailedjobout.py | 169 - ...odelout.py => classifierfinetunedmodel.py} | 74 +- ...erjobout.py => classifierfinetuningjob.py} | 127 +- .../models/classifierfinetuningjobdetails.py | 197 ++ ...ssifiertargetin.py => classifiertarget.py} | 39 +- ...targetout.py => classifiertargetresult.py} | 6 +- .../models/classifiertrainingparameters.py | 57 +- .../models/classifiertrainingparametersin.py | 92 - .../client/models/codeinterpretertool.py | 48 +- src/mistralai/client/models/completionargs.py | 79 +- .../client/models/completionchunk.py | 19 +- .../client/models/completiondetailedjobout.py | 176 - ...odelout.py => completionfinetunedmodel.py} | 65 +- ...onjobout.py => completionfinetuningjob.py} | 154 +- .../models/completionfinetuningjobdetails.py | 216 ++ .../models/completionresponsestreamchoice.py | 18 +- .../models/completiontrainingparameters.py | 67 +- .../models/completiontrainingparametersin.py | 97 - src/mistralai/client/models/contentchunk.py | 55 +- .../models/conversationappendrequest.py | 53 +- .../models/conversationappendstreamrequest.py | 53 +- .../client/models/conversationevents.py | 46 +- .../client/models/conversationhistory.py | 51 +- .../client/models/conversationmessages.py | 43 +- .../client/models/conversationrequest.py | 89 +- .../client/models/conversationresponse.py | 49 +- .../models/conversationrestartrequest.py | 56 +- .../conversationrestartstreamrequest.py | 56 +- .../models/conversationstreamrequest.py | 89 +- .../client/models/conversationthinkchunk.py | 65 + .../client/models/conversationusageinfo.py | 47 +- ...eationrequest.py => createagentrequest.py} | 79 +- ...batchjobin.py => createbatchjobrequest.py} | 57 +- ...uploadfileout.py => createfileresponse.py} | 43 +- .../client/models/createfinetuningjobop.py | 33 - ...jobin.py => createfinetuningjobrequest.py} | 122 +- .../{libraryin.py => createlibraryrequest.py} | 39 +- ...lete_model_v1_models_model_id_deleteop.py} | 6 +- ...deletefileout.py => deletefileresponse.py} | 6 +- src/mistralai/client/models/deletemodelout.py | 19 +- src/mistralai/client/models/deltamessage.py | 33 +- .../models/{documentout.py => document.py} | 91 +- .../client/models/documentlibrarytool.py | 48 +- .../client/models/documenturlchunk.py | 57 +- .../client/models/embeddingrequest.py | 44 +- .../client/models/embeddingresponsedata.py | 19 +- .../client/models/{eventout.py => event.py} | 39 +- src/mistralai/client/models/file.py | 19 +- src/mistralai/client/models/filechunk.py | 27 +- ...p.py => files_api_routes_delete_fileop.py} | 6 +- ...py => files_api_routes_download_fileop.py} | 6 +- ...y => files_api_routes_get_signed_urlop.py} | 25 +- ...op.py => files_api_routes_list_filesop.py} | 61 +- ...py => files_api_routes_retrieve_fileop.py} | 6 +- ...p.py => files_api_routes_upload_fileop.py} | 21 +- src/mistralai/client/models/fileschema.py | 37 +- .../client/models/fimcompletionrequest.py | 71 +- .../models/fimcompletionstreamrequest.py | 71 +- .../models/finetunedmodelcapabilities.py | 52 + .../client/models/ftmodelcapabilitiesout.py | 27 - src/mistralai/client/models/ftmodelcard.py | 79 +- src/mistralai/client/models/function.py | 19 +- .../client/models/functioncallentry.py | 96 +- .../client/models/functioncallevent.py | 66 +- .../client/models/functionresultentry.py | 65 +- src/mistralai/client/models/functiontool.py | 8 +- src/mistralai/client/models/getagentop.py | 69 - .../client/models/getdocumenttextcontentop.py | 22 - ...{retrievefileout.py => getfileresponse.py} | 43 +- .../client/models/getfinetuningjobop.py | 43 - ...lesignedurl.py => getsignedurlresponse.py} | 6 +- ...ubrepositoryout.py => githubrepository.py} | 45 +- .../client/models/githubrepositoryin.py | 39 +- src/mistralai/client/models/imagedetail.py | 16 + .../client/models/imagegenerationtool.py | 48 +- src/mistralai/client/models/imageurl.py | 38 +- src/mistralai/client/models/imageurlchunk.py | 42 +- src/mistralai/client/models/inputentries.py | 8 +- src/mistralai/client/models/inputs.py | 44 +- .../{jobmetadataout.py => jobmetadata.py} | 75 +- ...bs_api_routes_batch_cancel_batch_jobop.py} | 6 +- ... jobs_api_routes_batch_get_batch_jobop.py} | 39 +- ...jobs_api_routes_batch_get_batch_jobsop.py} | 63 +- ...fine_tuning_archive_fine_tuned_modelop.py} | 6 +- ...es_fine_tuning_cancel_fine_tuning_jobop.py | 78 + ...es_fine_tuning_create_fine_tuning_jobop.py | 70 + ...outes_fine_tuning_get_fine_tuning_jobop.py | 76 + ...tes_fine_tuning_get_fine_tuning_jobsop.py} | 87 +- ...tes_fine_tuning_start_fine_tuning_jobop.py | 74 + ...ne_tuning_unarchive_fine_tuned_modelop.py} | 6 +- ...s_fine_tuning_update_fine_tuned_modelop.py | 83 + src/mistralai/client/models/jobsout.py | 40 - src/mistralai/client/models/jsonschema.py | 37 +- ...jobmetadataout.py => legacyjobmetadata.py} | 93 +- ...elibraryop.py => libraries_delete_v1op.py} | 6 +- ....py => libraries_documents_delete_v1op.py} | 6 +- ...ents_get_extracted_text_signed_url_v1op.py | 22 + ...libraries_documents_get_signed_url_v1op.py | 22 + ...=> libraries_documents_get_status_v1op.py} | 6 +- ...raries_documents_get_text_content_v1op.py} | 6 +- ...sop.py => libraries_documents_get_v1op.py} | 6 +- ...op.py => libraries_documents_list_v1op.py} | 55 +- ... => libraries_documents_reprocess_v1op.py} | 6 +- ....py => libraries_documents_update_v1op.py} | 14 +- ....py => libraries_documents_upload_v1op.py} | 6 +- ...{getlibraryop.py => libraries_get_v1op.py} | 6 +- ...ssop.py => libraries_share_create_v1op.py} | 6 +- ...ssop.py => libraries_share_delete_v1op.py} | 6 +- ...ssesop.py => libraries_share_list_v1op.py} | 6 +- ...elibraryop.py => libraries_update_v1op.py} | 14 +- .../models/{libraryout.py => library.py} | 79 +- .../client/models/libraryinupdate.py | 54 - .../client/models/listbatchjobsresponse.py | 51 + ...ocumentout.py => listdocumentsresponse.py} | 12 +- .../{listfilesout.py => listfilesresponse.py} | 39 +- .../models/listfinetuningjobsresponse.py | 100 + .../client/models/listlibrariesresponse.py | 16 + src/mistralai/client/models/listlibraryout.py | 16 - .../models/messageinputcontentchunks.py | 15 +- .../client/models/messageinputentry.py | 80 +- .../models/messageoutputcontentchunks.py | 9 +- .../client/models/messageoutputentry.py | 103 +- .../client/models/messageoutputevent.py | 60 +- .../client/models/{metricout.py => metric.py} | 39 +- .../client/models/modelcapabilities.py | 32 +- .../client/models/modelconversation.py | 108 +- src/mistralai/client/models/modellist.py | 52 +- .../client/models/moderationobject.py | 19 +- src/mistralai/client/models/ocrimageobject.py | 49 +- src/mistralai/client/models/ocrpageobject.py | 33 +- src/mistralai/client/models/ocrrequest.py | 93 +- src/mistralai/client/models/ocrresponse.py | 31 +- src/mistralai/client/models/ocrtableobject.py | 6 + src/mistralai/client/models/ocrusageinfo.py | 33 +- .../client/models/outputcontentchunks.py | 9 +- src/mistralai/client/models/prediction.py | 27 +- .../models/realtimetranscriptionerror.py | 27 +- .../realtimetranscriptioninputaudioappend.py | 52 + .../realtimetranscriptioninputaudioend.py | 47 + .../realtimetranscriptioninputaudioflush.py | 47 + .../models/realtimetranscriptionsession.py | 39 +- .../realtimetranscriptionsessioncreated.py | 27 +- .../realtimetranscriptionsessionupdated.py | 27 +- ...altimetranscriptionsessionupdatemessage.py | 54 + ...altimetranscriptionsessionupdatepayload.py | 54 + src/mistralai/client/models/referencechunk.py | 42 +- .../client/models/reprocessdocumentop.py | 22 - .../client/models/responsedoneevent.py | 27 +- .../client/models/responseerrorevent.py | 27 +- src/mistralai/client/models/responseformat.py | 33 +- .../client/models/responsestartedevent.py | 27 +- ...retrieve_model_v1_models_model_id_getop.py | 64 + .../client/models/retrievemodelop.py | 36 - src/mistralai/client/models/security.py | 19 +- src/mistralai/client/models/sharingdelete.py | 33 +- src/mistralai/client/models/sharingin.py | 33 +- src/mistralai/client/models/sharingout.py | 33 +- .../client/models/startfinetuningjobop.py | 41 - src/mistralai/client/models/systemmessage.py | 8 +- src/mistralai/client/models/textchunk.py | 40 +- src/mistralai/client/models/thinkchunk.py | 52 +- src/mistralai/client/models/tool.py | 19 +- src/mistralai/client/models/toolcall.py | 19 +- .../client/models/toolcallconfirmation.py | 24 + src/mistralai/client/models/toolchoice.py | 19 +- .../client/models/toolconfiguration.py | 53 + .../client/models/toolexecutiondeltaevent.py | 27 +- .../client/models/toolexecutiondoneevent.py | 27 +- .../client/models/toolexecutionentry.py | 82 +- .../models/toolexecutionstartedevent.py | 48 +- src/mistralai/client/models/toolfilechunk.py | 54 +- src/mistralai/client/models/toolmessage.py | 49 +- .../client/models/toolreferencechunk.py | 55 +- src/mistralai/client/models/trainingfile.py | 19 +- .../client/models/transcriptionresponse.py | 33 +- .../models/transcriptionsegmentchunk.py | 58 +- .../client/models/transcriptionstreamdone.py | 41 +- .../models/transcriptionstreamevents.py | 36 +- .../models/transcriptionstreamlanguage.py | 8 +- .../models/transcriptionstreamsegmentdelta.py | 47 +- .../models/transcriptionstreamtextdelta.py | 8 +- .../client/models/unarchiveftmodelout.py | 27 - .../client/models/unarchivemodelresponse.py | 50 + ...updaterequest.py => updateagentrequest.py} | 97 +- ...ntupdatein.py => updatedocumentrequest.py} | 39 +- .../client/models/updateftmodelin.py | 54 - .../client/models/updatelibraryrequest.py | 49 + src/mistralai/client/models/updatemodelop.py | 43 - .../client/models/updatemodelrequest.py | 49 + src/mistralai/client/models/usageinfo.py | 45 +- src/mistralai/client/models/usermessage.py | 26 +- .../client/models/wandbintegration.py | 39 +- ...rationout.py => wandbintegrationresult.py} | 45 +- .../client/models/websearchpremiumtool.py | 48 +- src/mistralai/client/models/websearchtool.py | 48 +- src/mistralai/client/models_.py | 178 +- src/mistralai/client/ocr.py | 62 +- src/mistralai/client/sdk.py | 11 +- src/mistralai/client/transcriptions.py | 54 +- src/mistralai/client/utils/__init__.py | 56 +- src/mistralai/client/utils/dynamic_imports.py | 55 + src/mistralai/client/utils/eventstreaming.py | 126 +- src/mistralai/client/utils/forms.py | 4 +- src/mistralai/client/utils/retries.py | 14 +- src/mistralai/client/utils/security.py | 2 + src/mistralai/client/utils/unions.py | 33 + .../client/utils/unmarshal_json_response.py | 4 +- src/mistralai/extra/run/context.py | 8 +- 658 files changed, 13796 insertions(+), 11219 deletions(-) rename docs/{models => errors}/httpvalidationerror.md (100%) delete mode 100644 docs/models/agentconversationobject.md delete mode 100644 docs/models/agentcreationrequest.md delete mode 100644 docs/models/agenthandoffentryobject.md delete mode 100644 docs/models/agenthandoffentrytype.md delete mode 100644 docs/models/agentobject.md rename docs/models/{createorupdateagentaliasrequest.md => agentsapiv1agentscreateorupdatealiasrequest.md} (90%) rename docs/models/{deleteagentaliasrequest.md => agentsapiv1agentsdeletealiasrequest.md} (90%) rename docs/models/{deleteagentrequest.md => agentsapiv1agentsdeleterequest.md} (89%) rename docs/models/{getagentagentversion.md => agentsapiv1agentsgetagentversion.md} (79%) create mode 100644 docs/models/agentsapiv1agentsgetrequest.md rename docs/models/{getagentversionrequest.md => agentsapiv1agentsgetversionrequest.md} (90%) rename docs/models/{listagentsrequest.md => agentsapiv1agentslistrequest.md} (98%) rename docs/models/{listagentaliasesrequest.md => agentsapiv1agentslistversionaliasesrequest.md} (85%) rename docs/models/{listagentversionsrequest.md => agentsapiv1agentslistversionsrequest.md} (94%) create mode 100644 docs/models/agentsapiv1agentsupdaterequest.md rename docs/models/{updateagentversionrequest.md => agentsapiv1agentsupdateversionrequest.md} (89%) rename docs/models/{appendconversationrequest.md => agentsapiv1conversationsappendrequest.md} (96%) rename docs/models/{appendconversationstreamrequest.md => agentsapiv1conversationsappendstreamrequest.md} (96%) rename docs/models/{getconversationrequest.md => agentsapiv1conversationsdeleterequest.md} (95%) rename docs/models/{deleteconversationrequest.md => agentsapiv1conversationsgetrequest.md} (95%) rename docs/models/{getconversationhistoryrequest.md => agentsapiv1conversationshistoryrequest.md} (94%) rename docs/models/{listconversationsrequest.md => agentsapiv1conversationslistrequest.md} (92%) rename docs/models/{listconversationsresponse.md => agentsapiv1conversationslistresponse.md} (84%) rename docs/models/{getconversationmessagesrequest.md => agentsapiv1conversationsmessagesrequest.md} (94%) rename docs/models/{restartconversationrequest.md => agentsapiv1conversationsrestartrequest.md} (96%) rename docs/models/{restartconversationstreamrequest.md => agentsapiv1conversationsrestartstreamrequest.md} (96%) rename docs/models/{unarchiveftmodelout.md => archivemodelresponse.md} (96%) delete mode 100644 docs/models/assistantmessagerole.md rename docs/models/{batchjobout.md => batchjob.md} (99%) delete mode 100644 docs/models/batchjobsout.md delete mode 100644 docs/models/cancelfinetuningjobresponse.md rename docs/models/{checkpointout.md => checkpoint.md} (96%) delete mode 100644 docs/models/classifierdetailedjoboutintegration.md create mode 100644 docs/models/classifierfinetunedmodel.md rename docs/models/{classifierjobout.md => classifierfinetuningjob.md} (97%) rename docs/models/{classifierdetailedjobout.md => classifierfinetuningjobdetails.md} (94%) create mode 100644 docs/models/classifierfinetuningjobdetailsintegration.md rename docs/models/{classifierdetailedjoboutstatus.md => classifierfinetuningjobdetailsstatus.md} (94%) create mode 100644 docs/models/classifierfinetuningjobintegration.md rename docs/models/{completionjoboutstatus.md => classifierfinetuningjobstatus.md} (95%) delete mode 100644 docs/models/classifierftmodelout.md delete mode 100644 docs/models/classifierjoboutintegration.md rename docs/models/{classifiertargetin.md => classifiertarget.md} (99%) rename docs/models/{classifiertargetout.md => classifiertargetresult.md} (98%) delete mode 100644 docs/models/classifiertrainingparametersin.md delete mode 100644 docs/models/completiondetailedjoboutintegration.md delete mode 100644 docs/models/completiondetailedjoboutrepository.md create mode 100644 docs/models/completionfinetunedmodel.md rename docs/models/{completionjobout.md => completionfinetuningjob.md} (97%) rename docs/models/{completiondetailedjobout.md => completionfinetuningjobdetails.md} (94%) create mode 100644 docs/models/completionfinetuningjobdetailsintegration.md create mode 100644 docs/models/completionfinetuningjobdetailsrepository.md rename docs/models/{completiondetailedjoboutstatus.md => completionfinetuningjobdetailsstatus.md} (94%) create mode 100644 docs/models/completionfinetuningjobintegration.md create mode 100644 docs/models/completionfinetuningjobrepository.md rename docs/models/{classifierjoboutstatus.md => completionfinetuningjobstatus.md} (95%) delete mode 100644 docs/models/completionftmodelout.md delete mode 100644 docs/models/completionjoboutintegration.md delete mode 100644 docs/models/completionjoboutrepository.md delete mode 100644 docs/models/completiontrainingparametersin.md create mode 100644 docs/models/confirmation.md delete mode 100644 docs/models/conversationhistoryobject.md delete mode 100644 docs/models/conversationmessagesobject.md delete mode 100644 docs/models/conversationresponseobject.md create mode 100644 docs/models/conversationthinkchunk.md create mode 100644 docs/models/conversationthinkchunkthinking.md rename docs/models/{agentupdaterequest.md => createagentrequest.md} (80%) rename docs/models/{agentupdaterequesttool.md => createagentrequesttool.md} (96%) rename docs/models/{batchjobin.md => createbatchjobrequest.md} (99%) rename docs/models/{uploadfileout.md => createfileresponse.md} (99%) rename docs/models/{jobin.md => createfinetuningjobrequest.md} (97%) rename docs/models/{jobinintegration.md => createfinetuningjobrequestintegration.md} (74%) rename docs/models/{jobinrepository.md => createfinetuningjobrequestrepository.md} (75%) delete mode 100644 docs/models/createfinetuningjobresponse.md rename docs/models/{libraryin.md => createlibraryrequest.md} (95%) rename docs/models/{deletefileout.md => deletefileresponse.md} (97%) rename docs/models/{deletemodelrequest.md => deletemodelv1modelsmodeliddeleterequest.md} (94%) delete mode 100644 docs/models/documentout.md create mode 100644 docs/models/documentunion.md delete mode 100644 docs/models/documentupdatein.md delete mode 100644 docs/models/documenturlchunktype.md rename docs/models/{eventout.md => event.md} (98%) rename docs/models/{downloadfilerequest.md => filesapiroutesdeletefilerequest.md} (88%) rename docs/models/{retrievefilerequest.md => filesapiroutesdownloadfilerequest.md} (88%) rename docs/models/{getfilesignedurlrequest.md => filesapiroutesgetsignedurlrequest.md} (96%) rename docs/models/{listfilesrequest.md => filesapirouteslistfilesrequest.md} (98%) rename docs/models/{deletefilerequest.md => filesapiroutesretrievefilerequest.md} (88%) rename docs/models/{ftmodelcapabilitiesout.md => finetunedmodelcapabilities.md} (95%) create mode 100644 docs/models/functioncallentryconfirmationstatus.md delete mode 100644 docs/models/functioncallentryobject.md delete mode 100644 docs/models/functioncallentrytype.md create mode 100644 docs/models/functioncalleventconfirmationstatus.md delete mode 100644 docs/models/functionresultentryobject.md delete mode 100644 docs/models/functionresultentrytype.md delete mode 100644 docs/models/getdocumenttextcontentrequest.md rename docs/models/{retrievefileout.md => getfileresponse.md} (99%) delete mode 100644 docs/models/getfinetuningjobresponse.md rename docs/models/{filesignedurl.md => getsignedurlresponse.md} (92%) rename docs/models/{githubrepositoryout.md => githubrepository.md} (97%) create mode 100644 docs/models/imagedetail.md delete mode 100644 docs/models/imageurlchunktype.md delete mode 100644 docs/models/inputsmessage.md delete mode 100644 docs/models/instructrequestinputs.md rename docs/models/{jobmetadataout.md => jobmetadata.md} (98%) rename docs/models/{cancelbatchjobrequest.md => jobsapiroutesbatchcancelbatchjobrequest.md} (86%) rename docs/models/{getbatchjobrequest.md => jobsapiroutesbatchgetbatchjobrequest.md} (92%) rename docs/models/{listbatchjobsrequest.md => jobsapiroutesbatchgetbatchjobsrequest.md} (98%) rename docs/models/{archivemodelrequest.md => jobsapiroutesfinetuningarchivefinetunedmodelrequest.md} (93%) rename docs/models/{cancelfinetuningjobrequest.md => jobsapiroutesfinetuningcancelfinetuningjobrequest.md} (88%) create mode 100644 docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md create mode 100644 docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md rename docs/models/{getfinetuningjobrequest.md => jobsapiroutesfinetuninggetfinetuningjobrequest.md} (89%) create mode 100644 docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md create mode 100644 docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md rename docs/models/{listfinetuningjobsstatus.md => jobsapiroutesfinetuninggetfinetuningjobsstatus.md} (94%) rename docs/models/{startfinetuningjobrequest.md => jobsapiroutesfinetuningstartfinetuningjobrequest.md} (84%) create mode 100644 docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md rename docs/models/{unarchivemodelrequest.md => jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md} (92%) create mode 100644 docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md create mode 100644 docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md delete mode 100644 docs/models/jobsout.md delete mode 100644 docs/models/jobsoutdata.md rename docs/models/{legacyjobmetadataout.md => legacyjobmetadata.md} (99%) rename docs/models/{getlibraryrequest.md => librariesdeletev1request.md} (90%) rename docs/models/{getdocumentstatusrequest.md => librariesdocumentsdeletev1request.md} (90%) create mode 100644 docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md create mode 100644 docs/models/librariesdocumentsgetsignedurlv1request.md rename docs/models/{getdocumentrequest.md => librariesdocumentsgetstatusv1request.md} (90%) rename docs/models/{getdocumentextractedtextsignedurlrequest.md => librariesdocumentsgettextcontentv1request.md} (89%) rename docs/models/{getdocumentsignedurlrequest.md => librariesdocumentsgetv1request.md} (91%) rename docs/models/{listdocumentsrequest.md => librariesdocumentslistv1request.md} (96%) rename docs/models/{deletedocumentrequest.md => librariesdocumentsreprocessv1request.md} (90%) create mode 100644 docs/models/librariesdocumentsupdatev1request.md rename docs/models/{uploaddocumentrequest.md => librariesdocumentsuploadv1request.md} (96%) rename docs/models/{deletelibraryrequest.md => librariesgetv1request.md} (91%) rename docs/models/{updateorcreatelibraryaccessrequest.md => librariessharecreatev1request.md} (95%) rename docs/models/{deletelibraryaccessrequest.md => librariessharedeletev1request.md} (96%) rename docs/models/{listlibraryaccessesrequest.md => librariessharelistv1request.md} (90%) create mode 100644 docs/models/librariesupdatev1request.md rename docs/models/{libraryout.md => library.md} (99%) delete mode 100644 docs/models/libraryinupdate.md create mode 100644 docs/models/listbatchjobsresponse.md rename docs/models/{listdocumentout.md => listdocumentsresponse.md} (90%) rename docs/models/{listfilesout.md => listfilesresponse.md} (98%) delete mode 100644 docs/models/listfinetuningjobsrequest.md create mode 100644 docs/models/listfinetuningjobsresponse.md create mode 100644 docs/models/listfinetuningjobsresponsedata.md create mode 100644 docs/models/listlibrariesresponse.md delete mode 100644 docs/models/listlibraryout.md delete mode 100644 docs/models/messageinputentryobject.md delete mode 100644 docs/models/messageinputentrytype.md delete mode 100644 docs/models/messageoutputentryobject.md delete mode 100644 docs/models/messageoutputentryrole.md delete mode 100644 docs/models/messageoutputentrytype.md delete mode 100644 docs/models/messageoutputeventrole.md rename docs/models/{metricout.md => metric.md} (98%) delete mode 100644 docs/models/modelconversationobject.md create mode 100644 docs/models/realtimetranscriptioninputaudioappend.md create mode 100644 docs/models/realtimetranscriptioninputaudioend.md create mode 100644 docs/models/realtimetranscriptioninputaudioflush.md create mode 100644 docs/models/realtimetranscriptionsessionupdatemessage.md rename docs/models/{getagentrequest.md => realtimetranscriptionsessionupdatepayload.md} (57%) delete mode 100644 docs/models/referencechunktype.md delete mode 100644 docs/models/reprocessdocumentrequest.md rename docs/models/{retrievemodelrequest.md => retrievemodelv1modelsmodelidgetrequest.md} (94%) rename docs/models/{messageinputentryrole.md => role.md} (84%) delete mode 100644 docs/models/startfinetuningjobresponse.md delete mode 100644 docs/models/textchunktype.md rename docs/models/{thinking.md => thinkchunkthinking.md} (90%) delete mode 100644 docs/models/thinkchunktype.md create mode 100644 docs/models/toolcallconfirmation.md rename docs/models/{updateftmodelin.md => toolconfiguration.md} (54%) delete mode 100644 docs/models/toolexecutionentryobject.md delete mode 100644 docs/models/toolexecutionentrytype.md delete mode 100644 docs/models/toolfilechunktype.md delete mode 100644 docs/models/toolreferencechunktype.md delete mode 100644 docs/models/transcriptionsegmentchunktype.md rename docs/models/{archiveftmodelout.md => unarchivemodelresponse.md} (96%) rename docs/models/{agentcreationrequesttool.md => updateagentrequesttool.md} (95%) delete mode 100644 docs/models/updatemodelresponse.md rename docs/models/{wandbintegrationout.md => wandbintegrationresult.md} (98%) rename packages/azure/docs/{models => errors}/httpvalidationerror.md (100%) create mode 100644 packages/azure/src/mistralai/azure/client/errors/__init__.py rename packages/azure/src/mistralai/azure/client/{models => errors}/httpvalidationerror.py (76%) rename packages/azure/src/mistralai/azure/client/{models => errors}/mistralazureerror.py (100%) rename packages/azure/src/mistralai/azure/client/{models => errors}/no_response_error.py (100%) rename packages/azure/src/mistralai/azure/client/{models => errors}/responsevalidationerror.py (92%) rename packages/azure/src/mistralai/azure/client/{models => errors}/sdkerror.py (95%) create mode 100644 packages/azure/src/mistralai/azure/client/utils/dynamic_imports.py create mode 100644 packages/azure/src/mistralai/azure/client/utils/unions.py rename packages/gcp/docs/{models => errors}/httpvalidationerror.md (100%) create mode 100644 packages/gcp/src/mistralai/gcp/client/errors/__init__.py rename packages/gcp/src/mistralai/gcp/client/{models => errors}/httpvalidationerror.py (77%) rename packages/gcp/src/mistralai/gcp/client/{models => errors}/mistralgcperror.py (100%) rename packages/gcp/src/mistralai/gcp/client/{models => errors}/no_response_error.py (100%) rename packages/gcp/src/mistralai/gcp/client/{models => errors}/responsevalidationerror.py (92%) rename packages/gcp/src/mistralai/gcp/client/{models => errors}/sdkerror.py (95%) create mode 100644 packages/gcp/src/mistralai/gcp/client/utils/dynamic_imports.py create mode 100644 packages/gcp/src/mistralai/gcp/client/utils/unions.py create mode 100644 src/mistralai/client/errors/__init__.py rename src/mistralai/client/{models => errors}/httpvalidationerror.py (75%) rename src/mistralai/client/{models => errors}/mistralerror.py (96%) rename src/mistralai/client/{models => errors}/no_response_error.py (93%) rename src/mistralai/client/{models => errors}/responsevalidationerror.py (90%) rename src/mistralai/client/{models => errors}/sdkerror.py (94%) rename src/mistralai/client/models/{createorupdateagentaliasop.py => agents_api_v1_agents_create_or_update_aliasop.py} (80%) rename src/mistralai/client/models/{deleteagentaliasop.py => agents_api_v1_agents_delete_aliasop.py} (78%) rename src/mistralai/client/models/{listagentaliasesop.py => agents_api_v1_agents_deleteop.py} (74%) rename src/mistralai/client/models/{getagentversionop.py => agents_api_v1_agents_get_versionop.py} (78%) create mode 100644 src/mistralai/client/models/agents_api_v1_agents_getop.py rename src/mistralai/client/models/{deleteagentop.py => agents_api_v1_agents_list_version_aliasesop.py} (71%) rename src/mistralai/client/models/{listagentversionsop.py => agents_api_v1_agents_list_versionsop.py} (56%) rename src/mistralai/client/models/{listagentsop.py => agents_api_v1_agents_listop.py} (70%) rename src/mistralai/client/models/{updateagentversionop.py => agents_api_v1_agents_update_versionop.py} (78%) rename src/mistralai/client/models/{updateagentop.py => agents_api_v1_agents_updateop.py} (62%) rename src/mistralai/client/models/{appendconversationstreamop.py => agents_api_v1_conversations_append_streamop.py} (85%) rename src/mistralai/client/models/{appendconversationop.py => agents_api_v1_conversations_appendop.py} (85%) rename src/mistralai/client/models/{deleteconversationop.py => agents_api_v1_conversations_deleteop.py} (78%) rename src/mistralai/client/models/{getconversationop.py => agents_api_v1_conversations_getop.py} (88%) rename src/mistralai/client/models/{getconversationhistoryop.py => agents_api_v1_conversations_historyop.py} (78%) rename src/mistralai/client/models/{listconversationsop.py => agents_api_v1_conversations_listop.py} (59%) rename src/mistralai/client/models/{getconversationmessagesop.py => agents_api_v1_conversations_messagesop.py} (78%) rename src/mistralai/client/models/{restartconversationstreamop.py => agents_api_v1_conversations_restart_streamop.py} (85%) rename src/mistralai/client/models/{restartconversationop.py => agents_api_v1_conversations_restartop.py} (85%) delete mode 100644 src/mistralai/client/models/archiveftmodelout.py create mode 100644 src/mistralai/client/models/archivemodelresponse.py rename src/mistralai/client/models/{batchjobout.py => batchjob.py} (64%) delete mode 100644 src/mistralai/client/models/batchjobsout.py delete mode 100644 src/mistralai/client/models/cancelfinetuningjobop.py rename src/mistralai/client/models/{checkpointout.py => checkpoint.py} (81%) delete mode 100644 src/mistralai/client/models/classifierdetailedjobout.py rename src/mistralai/client/models/{classifierftmodelout.py => classifierfinetunedmodel.py} (56%) rename src/mistralai/client/models/{classifierjobout.py => classifierfinetuningjob.py} (63%) create mode 100644 src/mistralai/client/models/classifierfinetuningjobdetails.py rename src/mistralai/client/models/{classifiertargetin.py => classifiertarget.py} (55%) rename src/mistralai/client/models/{classifiertargetout.py => classifiertargetresult.py} (79%) delete mode 100644 src/mistralai/client/models/classifiertrainingparametersin.py delete mode 100644 src/mistralai/client/models/completiondetailedjobout.py rename src/mistralai/client/models/{completionftmodelout.py => completionfinetunedmodel.py} (60%) rename src/mistralai/client/models/{completionjobout.py => completionfinetuningjob.py} (56%) create mode 100644 src/mistralai/client/models/completionfinetuningjobdetails.py delete mode 100644 src/mistralai/client/models/completiontrainingparametersin.py create mode 100644 src/mistralai/client/models/conversationthinkchunk.py rename src/mistralai/client/models/{agentcreationrequest.py => createagentrequest.py} (66%) rename src/mistralai/client/models/{batchjobin.py => createbatchjobrequest.py} (76%) rename src/mistralai/client/models/{uploadfileout.py => createfileresponse.py} (69%) delete mode 100644 src/mistralai/client/models/createfinetuningjobop.py rename src/mistralai/client/models/{jobin.py => createfinetuningjobrequest.py} (56%) rename src/mistralai/client/models/{libraryin.py => createlibraryrequest.py} (50%) rename src/mistralai/client/models/{deletemodelop.py => delete_model_v1_models_model_id_deleteop.py} (76%) rename src/mistralai/client/models/{deletefileout.py => deletefileresponse.py} (82%) rename src/mistralai/client/models/{documentout.py => document.py} (60%) rename src/mistralai/client/models/{eventout.py => event.py} (56%) rename src/mistralai/client/models/{downloadfileop.py => files_api_routes_delete_fileop.py} (74%) rename src/mistralai/client/models/{deletefileop.py => files_api_routes_download_fileop.py} (73%) rename src/mistralai/client/models/{getfilesignedurlop.py => files_api_routes_get_signed_urlop.py} (51%) rename src/mistralai/client/models/{listfilesop.py => files_api_routes_list_filesop.py} (70%) rename src/mistralai/client/models/{retrievefileop.py => files_api_routes_retrieve_fileop.py} (73%) rename src/mistralai/client/models/{uploadfileop.py => files_api_routes_upload_fileop.py} (70%) create mode 100644 src/mistralai/client/models/finetunedmodelcapabilities.py delete mode 100644 src/mistralai/client/models/ftmodelcapabilitiesout.py delete mode 100644 src/mistralai/client/models/getagentop.py delete mode 100644 src/mistralai/client/models/getdocumenttextcontentop.py rename src/mistralai/client/models/{retrievefileout.py => getfileresponse.py} (69%) delete mode 100644 src/mistralai/client/models/getfinetuningjobop.py rename src/mistralai/client/models/{filesignedurl.py => getsignedurlresponse.py} (65%) rename src/mistralai/client/models/{githubrepositoryout.py => githubrepository.py} (59%) create mode 100644 src/mistralai/client/models/imagedetail.py rename src/mistralai/client/models/{jobmetadataout.py => jobmetadata.py} (52%) rename src/mistralai/client/models/{cancelbatchjobop.py => jobs_api_routes_batch_cancel_batch_jobop.py} (72%) rename src/mistralai/client/models/{getbatchjobop.py => jobs_api_routes_batch_get_batch_jobop.py} (56%) rename src/mistralai/client/models/{listbatchjobsop.py => jobs_api_routes_batch_get_batch_jobsop.py} (71%) rename src/mistralai/client/models/{archivemodelop.py => jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py} (73%) create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py rename src/mistralai/client/models/{listfinetuningjobsop.py => jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py} (75%) create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py rename src/mistralai/client/models/{unarchivemodelop.py => jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py} (73%) create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py delete mode 100644 src/mistralai/client/models/jobsout.py rename src/mistralai/client/models/{legacyjobmetadataout.py => legacyjobmetadata.py} (70%) rename src/mistralai/client/models/{deletelibraryop.py => libraries_delete_v1op.py} (76%) rename src/mistralai/client/models/{deletedocumentop.py => libraries_documents_delete_v1op.py} (79%) create mode 100644 src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py rename src/mistralai/client/models/{getdocumentop.py => libraries_documents_get_status_v1op.py} (78%) rename src/mistralai/client/models/{getdocumentextractedtextsignedurlop.py => libraries_documents_get_text_content_v1op.py} (77%) rename src/mistralai/client/models/{getdocumentstatusop.py => libraries_documents_get_v1op.py} (80%) rename src/mistralai/client/models/{listdocumentsop.py => libraries_documents_list_v1op.py} (67%) rename src/mistralai/client/models/{getdocumentsignedurlop.py => libraries_documents_reprocess_v1op.py} (78%) rename src/mistralai/client/models/{updatedocumentop.py => libraries_documents_update_v1op.py} (64%) rename src/mistralai/client/models/{uploaddocumentop.py => libraries_documents_upload_v1op.py} (91%) rename src/mistralai/client/models/{getlibraryop.py => libraries_get_v1op.py} (77%) rename src/mistralai/client/models/{updateorcreatelibraryaccessop.py => libraries_share_create_v1op.py} (81%) rename src/mistralai/client/models/{deletelibraryaccessop.py => libraries_share_delete_v1op.py} (83%) rename src/mistralai/client/models/{listlibraryaccessesop.py => libraries_share_list_v1op.py} (75%) rename src/mistralai/client/models/{updatelibraryop.py => libraries_update_v1op.py} (60%) rename src/mistralai/client/models/{libraryout.py => library.py} (58%) delete mode 100644 src/mistralai/client/models/libraryinupdate.py create mode 100644 src/mistralai/client/models/listbatchjobsresponse.py rename src/mistralai/client/models/{listdocumentout.py => listdocumentsresponse.py} (60%) rename src/mistralai/client/models/{listfilesout.py => listfilesresponse.py} (53%) create mode 100644 src/mistralai/client/models/listfinetuningjobsresponse.py create mode 100644 src/mistralai/client/models/listlibrariesresponse.py delete mode 100644 src/mistralai/client/models/listlibraryout.py rename src/mistralai/client/models/{metricout.py => metric.py} (60%) create mode 100644 src/mistralai/client/models/realtimetranscriptioninputaudioappend.py create mode 100644 src/mistralai/client/models/realtimetranscriptioninputaudioend.py create mode 100644 src/mistralai/client/models/realtimetranscriptioninputaudioflush.py create mode 100644 src/mistralai/client/models/realtimetranscriptionsessionupdatemessage.py create mode 100644 src/mistralai/client/models/realtimetranscriptionsessionupdatepayload.py delete mode 100644 src/mistralai/client/models/reprocessdocumentop.py create mode 100644 src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py delete mode 100644 src/mistralai/client/models/retrievemodelop.py delete mode 100644 src/mistralai/client/models/startfinetuningjobop.py create mode 100644 src/mistralai/client/models/toolcallconfirmation.py create mode 100644 src/mistralai/client/models/toolconfiguration.py delete mode 100644 src/mistralai/client/models/unarchiveftmodelout.py create mode 100644 src/mistralai/client/models/unarchivemodelresponse.py rename src/mistralai/client/models/{agentupdaterequest.py => updateagentrequest.py} (66%) rename src/mistralai/client/models/{documentupdatein.py => updatedocumentrequest.py} (60%) delete mode 100644 src/mistralai/client/models/updateftmodelin.py create mode 100644 src/mistralai/client/models/updatelibraryrequest.py delete mode 100644 src/mistralai/client/models/updatemodelop.py create mode 100644 src/mistralai/client/models/updatemodelrequest.py rename src/mistralai/client/models/{wandbintegrationout.py => wandbintegrationresult.py} (65%) create mode 100644 src/mistralai/client/utils/dynamic_imports.py create mode 100644 src/mistralai/client/utils/unions.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 6e86c59c..678c20f2 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,26 +1,26 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 2d9e4f612e5caf84349ab02663eee66e + docChecksum: b66b034aac7aa9b38c4fb47a3b3d843e docVersion: 1.0.0 - speakeasyVersion: 1.685.0 - generationVersion: 2.794.1 - releaseVersion: 2.0.0a3 - configChecksum: 7fc1ba01c21def8447b979e71593af4a + speakeasyVersion: 1.729.0 + generationVersion: 2.841.0 + releaseVersion: 2.0.0-a3.1 + configChecksum: 134292298710eaf25a0f90f7097e648f repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true persistentEdits: - generation_id: 3aa9018f-cb6c-4c1b-96d0-b832fd5f6513 - pristine_commit_hash: 5c4e3b65b7572c91338d50dc3ca91ea6a46eedf7 - pristine_tree_hash: aaea604044e12872107c3b550ea7be094fb66a99 + generation_id: 21ec746f-e476-468a-bb8e-c942c0997501 + pristine_commit_hash: 99ae95385eb06175841ba19bef78319a5921c585 + pristine_tree_hash: 5b06b6f5add0cd16af8139d524a42368532441c6 features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 configurableModuleName: 0.2.0 - constsAndDefaults: 1.0.5 - core: 5.23.18 + constsAndDefaults: 1.0.7 + core: 6.0.12 customCodeRegions: 0.1.1 defaultEnabledRetries: 0.2.0 downloadStreams: 1.0.1 @@ -29,23 +29,23 @@ features: examples: 3.0.2 flatRequests: 1.0.1 flattening: 3.1.1 - globalSecurity: 3.0.4 + globalSecurity: 3.0.5 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.2.0 includes: 3.0.0 methodArguments: 1.0.2 multipartFileContentType: 1.0.0 - nameOverrides: 3.0.1 - nullables: 1.0.1 - openEnums: 1.0.2 - responseFormat: 1.0.1 - retries: 3.0.3 - sdkHooks: 1.2.0 - serverEvents: 1.0.11 + nameOverrides: 3.0.3 + nullables: 1.0.2 + openEnums: 1.0.4 + responseFormat: 1.1.0 + retries: 3.0.4 + sdkHooks: 1.2.1 + serverEvents: 1.0.13 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.1.1 + unions: 3.1.4 uploadStreams: 1.0.0 trackedFiles: .gitattributes: @@ -58,64 +58,128 @@ trackedFiles: pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae USAGE.md: id: 3aed33ce6e6f - last_write_checksum: sha1:b1cf4cc87111df10c55731b3f5abad22890387a2 - pristine_git_object: 1810386448a440cfc5f7b8579695b228ae40460d + last_write_checksum: sha1:50cc0351d6145a805d1d5ae8be4dfce58178e648 + pristine_git_object: f71bbabc223b8cef8d923816fce8d572f3901884 + docs/errors/httpvalidationerror.md: + id: 7fe2e5327e07 + last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e + pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc docs/models/agent.md: id: ffdbb4c53c87 - last_write_checksum: sha1:4538aaa78a09b7e33db405f84916b1eb82f94bca - pristine_git_object: e335d889cdb70f4d3c987827ff714db90418cb39 + last_write_checksum: sha1:c87b05a17785cd83fdfc58cb2d55b6d77d3bc23e + pristine_git_object: 4de5a901d120b85ba5940490a2ec3fd4f1a91136 docs/models/agentaliasresponse.md: id: 5ac4721d8947 last_write_checksum: sha1:15dcc6820e89d2c6bb799e331463419ce29ec167 pristine_git_object: aa531ec5d1464f95e3938f148c1e88efc30fa6a6 docs/models/agentconversation.md: id: 3590c1a566fa - last_write_checksum: sha1:264d78815c3999bac377ab3f8c08a264178baf43 - pristine_git_object: a2d617316f1965acfabf7d2fe74334de16213829 + last_write_checksum: sha1:43e7c1ed2b43aca2794d89f2e6d6aa5f1478cc3e + pristine_git_object: 451f6fb8f700dddd54c69593c316bf562b5cbc93 docs/models/agentconversationagentversion.md: id: 468e0d1614bb last_write_checksum: sha1:6e60bf4a18d791d694e90c89bdb8cc38e43c324b pristine_git_object: 668a8dc0f0c51a231a73aed51b2db13de243a038 - docs/models/agentconversationobject.md: - id: cfd35d9dd4f2 - last_write_checksum: sha1:112552d4a241967cf0a7dcb981428e7e0715dc34 - pristine_git_object: ea7cc75c5197ed42f9fb508a969baa16effe1f98 - docs/models/agentcreationrequest.md: - id: 697a770fe5c0 - last_write_checksum: sha1:b3f12ca0a6356e657de2941c8441fc951bcc96f4 - pristine_git_object: f0f0fdbc13f8f490ded4f8df3944250aece1311b - docs/models/agentcreationrequesttool.md: - id: 392d970ffb74 - last_write_checksum: sha1:310d4b107554a9c16143191fdc306a5438b63768 - pristine_git_object: b3bd7fa3cead0a0a1480b0e1b3f0afbfd177b600 docs/models/agenthandoffdoneevent.md: id: dcf166a3c3b0 last_write_checksum: sha1:9e95c09f724827f5e9c202fd634bdfa2baef1b6e pristine_git_object: 6bfcc3d83457edf05d0f13957d34ead0f260599b docs/models/agenthandoffentry.md: id: 39d54f489b84 - last_write_checksum: sha1:7d949e750fd24dea20cabae340f9204d8f756008 - pristine_git_object: 8831b0ebad1c4e857f4f4353d1815753bb13125f - docs/models/agenthandoffentryobject.md: - id: ac62dd5f1002 - last_write_checksum: sha1:9d25ec388406e6faa765cf163e1e6dcb590ca0e9 - pristine_git_object: 4bb876fb3c60a42cf530c932b7c60278e6036f03 - docs/models/agenthandoffentrytype.md: - id: 07506fd159e0 - last_write_checksum: sha1:27ce9bdf225fbad46230e339a5c6d96213f1df62 - pristine_git_object: 527ebceb2ff1bbba1067f30438befd5e2c2e91d6 + last_write_checksum: sha1:a93a604ced2303eb6f93cfe0f1360224d3298b37 + pristine_git_object: 2b689ec720c02b7289ec462d7acca64a82b23570 docs/models/agenthandoffstartedevent.md: id: b620102af460 last_write_checksum: sha1:33732e0465423348c2ace458506a597a3dadf9b2 pristine_git_object: 518b5a0c4521ec55a5a28ba3ef0ad1c1fce52792 - docs/models/agentobject.md: - id: ed24a6d647a0 - last_write_checksum: sha1:ff5dfde6cc19f09c83afb5b4f0f103096df6691d - pristine_git_object: 70e143b030d3041c7538ecdacb8f5f9f8d1b5c92 + docs/models/agentsapiv1agentscreateorupdatealiasrequest.md: + id: c09ec9946094 + last_write_checksum: sha1:0883217b4bad21f5d4f8162ca72005bf9105a93f + pristine_git_object: 79406434cc6ff3d1485089f35639d6284f66d6cb + docs/models/agentsapiv1agentsdeletealiasrequest.md: + id: 429307ab315d + last_write_checksum: sha1:8e0a8388bb51c234aa1eb5566cb68389ebe57574 + pristine_git_object: 8e95c0c31e8ac92b374c153d622d7806b9e59a8d + docs/models/agentsapiv1agentsdeleterequest.md: + id: 0faaaa59add9 + last_write_checksum: sha1:2a34269e682bb910b83814b4d730ba2ce07f8cb2 + pristine_git_object: 2799f41817ab0f7a22b49b4ff895c8308525953c + docs/models/agentsapiv1agentsgetagentversion.md: + id: 3316961b40c4 + last_write_checksum: sha1:e4f4c6a64b1c2ec9465b7ad008df4d7859098e59 + pristine_git_object: 7fb9f2d578c4901ca1b41aaada6acc3a5ee94fa1 + docs/models/agentsapiv1agentsgetrequest.md: + id: 01740ae62cff + last_write_checksum: sha1:bc86e90289ec09b40212083a82455b4fe71c7194 + pristine_git_object: ceffe0096ffd6db97a6018d34870c29cec4fb0d3 + docs/models/agentsapiv1agentsgetversionrequest.md: + id: 88ed22b85cde + last_write_checksum: sha1:0ef23807c8efa2662144da66745045abdd2cb60a + pristine_git_object: 96a7358943a69e871a2bb7f0f30d6fe2bb8dff3d + docs/models/agentsapiv1agentslistrequest.md: + id: c2720c209527 + last_write_checksum: sha1:99502da34d868f1563ad1e3ea256f3becdbefa11 + pristine_git_object: 4785a54c561f5f9e1eb7ffd3317c5faa9b8b56dd + docs/models/agentsapiv1agentslistversionaliasesrequest.md: + id: 69c8bce2c017 + last_write_checksum: sha1:4083fc80627b2cc04fd271df21393944730ef1ba + pristine_git_object: 3083bf92641404738948cd57306eac978b701551 + docs/models/agentsapiv1agentslistversionsrequest.md: + id: 0bc44ed8d6bb + last_write_checksum: sha1:315790552fc5b2b3a6c4f7be2eb33100133abe18 + pristine_git_object: 91831700bed92cb4f609f8c412dcb0ee98b544ca + docs/models/agentsapiv1agentsupdaterequest.md: + id: 7692812cd677 + last_write_checksum: sha1:aaccaa13eeb0d775b0c6a0b23c328d3f3c2c2dbf + pristine_git_object: 7ef60becfcdde09c8ce0366361306c5661d67e24 + docs/models/agentsapiv1agentsupdateversionrequest.md: + id: a001251b1624 + last_write_checksum: sha1:0ee9e0fc55fd969f2b8f2c55dec93bf10e0e5b2f + pristine_git_object: e937acc9b1d3f50eee69495b1305f7aee1c960ac + docs/models/agentsapiv1conversationsappendrequest.md: + id: 70f76380e810 + last_write_checksum: sha1:d428dc114b60362d269b5ae50a57ea60b9edee1a + pristine_git_object: ac8a00ecab30305de8eb8c7c08cda1b1c04148c3 + docs/models/agentsapiv1conversationsappendstreamrequest.md: + id: f6ada9a592c5 + last_write_checksum: sha1:8a806ca2e5bad75d9d0cf50726dc0d5b8e7e3eab + pristine_git_object: dbc330f11aa3039c9cea2dd7d477d56d5c4969d0 + docs/models/agentsapiv1conversationsdeleterequest.md: + id: c2c9f084ed93 + last_write_checksum: sha1:9ecca93f8123cebdd1f9e74cf0f4a104b46402a8 + pristine_git_object: c6eed281331cb4d2cac4470de5e04935d22eca5a + docs/models/agentsapiv1conversationsgetrequest.md: + id: d6acce23f92c + last_write_checksum: sha1:b5d5529b72c16293d3d9b5c45dcb2e3798405bcf + pristine_git_object: 67d450c88778cb27d7d0ba06d49d9f419840b32e + docs/models/agentsapiv1conversationshistoryrequest.md: + id: e3efc36ea8b5 + last_write_checksum: sha1:4155100eaed6d3b7410b3f4476f000d1879576be + pristine_git_object: 7e5d39e9a11ac437a24b8c059db56527fa93f8b0 + docs/models/agentsapiv1conversationslistrequest.md: + id: 406c3e92777a + last_write_checksum: sha1:d5c5effcf2ca32900678d20b667bdaf8ca908194 + pristine_git_object: 62c9011faf26b3a4268186f01caf98c186e7d5b4 + docs/models/agentsapiv1conversationslistresponse.md: + id: 394c37d2203f + last_write_checksum: sha1:1144f41f8a97daacfb75c11fdf3575e553cf0859 + pristine_git_object: b233ee203ff5da0c65d6e9f87b2925d6802d2c0a + docs/models/agentsapiv1conversationsmessagesrequest.md: + id: 2c749c6620d4 + last_write_checksum: sha1:781e526b030653dc189d94ca04cdc4742f9506d2 + pristine_git_object: a91ab0466d57379eacea9d475c72db9cb228a649 + docs/models/agentsapiv1conversationsrestartrequest.md: + id: 6955883f9a44 + last_write_checksum: sha1:99c1455c7fde9b82b6940e6e1ed4f363d7c38de9 + pristine_git_object: a18a41f5395adae3942573792c86ddf7c3812ff4 + docs/models/agentsapiv1conversationsrestartstreamrequest.md: + id: 0c39856fd70e + last_write_checksum: sha1:d03475c088c059077049270c69be01c67a17f178 + pristine_git_object: 7548286af5d1db51fbfd29c893eb8afdc3c97c4d docs/models/agentscompletionrequest.md: id: 906b82c214dc - last_write_checksum: sha1:84ee0378e413830260a279a67fc3b1342e643328 - pristine_git_object: d87dc7da67dd883f92a23d8df4f5648e97c4f12e + last_write_checksum: sha1:b5685a779b633823ccfe99d9740078e0aab50bde + pristine_git_object: 33435732b94c81c7bccff5cf1868b2f382223200 docs/models/agentscompletionrequestmessage.md: id: 5337f0644b40 last_write_checksum: sha1:ecf7b7cdf0d24a5e97b520366cf816b8731734bb @@ -130,8 +194,8 @@ trackedFiles: pristine_git_object: 63b9dca9fbb8d829f93d8327a77fbc385a846c76 docs/models/agentscompletionstreamrequest.md: id: 21d09756447b - last_write_checksum: sha1:0c88bc63255733480b65b61685dcc356fcc9ed66 - pristine_git_object: dd1804a1b3a2aadc3e3c3964262b0fc25195703f + last_write_checksum: sha1:9d506ac8f620f4cef54b4b7a1891fb17b8eaefa5 + pristine_git_object: 407be8e0c1264a31cc0d80c1059f3bd62c2eaceb docs/models/agentscompletionstreamrequestmessage.md: id: b309ade92081 last_write_checksum: sha1:98744c9646969250242cbbfbdf428dbd7030e4bb @@ -148,58 +212,34 @@ trackedFiles: id: 513b8b7bc0b7 last_write_checksum: sha1:9154d0ac6b0ab8970a10a8ad7716009d62e80ce7 pristine_git_object: 022f7e10edb22cb1b1d741c13ac586bd136d03b5 - docs/models/agentupdaterequest.md: - id: 75a7f820b906 - last_write_checksum: sha1:358e39130bc439f5801a2dcc73502a1f1c2c6685 - pristine_git_object: b1830d7be6cb8e33529246a3368deaf0909a3343 - docs/models/agentupdaterequesttool.md: - id: 9c9aac9dda3d - last_write_checksum: sha1:25d8a331a706bf8e6056b99f8ff1a46abff6ae72 - pristine_git_object: ce5531260e9b06db0b93d4bfcf95a12b627da522 docs/models/apiendpoint.md: id: be613fd9b947 last_write_checksum: sha1:4d984c11248f7da42c949164e69b53995d5942c4 pristine_git_object: 8d83a26f19241da5ce626ff9526575c50e5d27be - docs/models/appendconversationrequest.md: - id: 295b6d446690 - last_write_checksum: sha1:0c3d7091b19abf30fb0b78800cab292abd902c1d - pristine_git_object: 977d8e8b797c8ae36de4da90bc32bba47a6a0779 - docs/models/appendconversationstreamrequest.md: - id: aeea33736f95 - last_write_checksum: sha1:a0b5b036e46688e862c7f7671c86f965b5322742 - pristine_git_object: a23231c2c2f0017ba29c8863c3046aebe8f57ff1 - docs/models/archiveftmodelout.md: - id: 9e855deac0d1 - last_write_checksum: sha1:41866e666241ed42e5e7c6df5a64b887f1ff774b - pristine_git_object: 98fa7b19e4579198b433eccc76b2b4d990476b72 - docs/models/archivemodelrequest.md: - id: 3fde72a45ad9 - last_write_checksum: sha1:60eaa9be631215c63a2c01da7da809ec34f5b01a - pristine_git_object: 806d135e2bc6c0da2b20a4bb84107d3ab31962ad + docs/models/archivemodelresponse.md: + id: 133f4af8058f + last_write_checksum: sha1:95fa73ebd765cbd244c847218df6d31e18dc5e85 + pristine_git_object: 276656d1d00ca174e78aa9102f7f576575daa818 docs/models/arguments.md: id: 7ea5e33709a7 last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec pristine_git_object: 2e54e27e0ca97bee87918b2ae38cc6c335669a79 docs/models/assistantmessage.md: id: 7e0218023943 - last_write_checksum: sha1:e75d407349842b2de46ee3ca6250f9f51121cf38 - pristine_git_object: 3d0bd90b4433c1a919f917f4bcf2518927cdcd50 + last_write_checksum: sha1:47d5cd1a1bef9e398c12c207f5b3d8486d94f359 + pristine_git_object: 9ef638379aee1198742743800e778409c47a9b9d docs/models/assistantmessagecontent.md: id: 9f1795bbe642 last_write_checksum: sha1:1ce4066623a8d62d969e5ed3a088d73a9ba26643 pristine_git_object: 047b7cf95f4db203bf2c501680b73ca0562a122d - docs/models/assistantmessagerole.md: - id: bb5d2a4bc72f - last_write_checksum: sha1:82f2c4f469426bd476c1003a91394afb89cb7c91 - pristine_git_object: 658229e77eb6419391cf7941568164541c528387 docs/models/attributes.md: id: ececf40457de last_write_checksum: sha1:9f23adf16a682cc43346d157f7e971c596b416ef pristine_git_object: 147708d9238e40e1cdb222beee15fbe8c1603050 docs/models/audiochunk.md: id: 88315a758fd4 - last_write_checksum: sha1:d52e493765280fc0b1df61a0ce1086205965c712 - pristine_git_object: 8a04af045f4ce33a2964f5f75664e82c3edf1bf3 + last_write_checksum: sha1:b47b295122cea28d66212d75a1f0eccd70a248cc + pristine_git_object: 1ba8b0f578fa94b4f8dddf559798e033a1704e7b docs/models/audioencoding.md: id: 1e0dfee9c2a0 last_write_checksum: sha1:5d47cfaca916d7a47adbea71748595b3ab69a478 @@ -210,8 +250,8 @@ trackedFiles: pristine_git_object: d174ab9959cadde659f76db94ed87c743e0f6783 docs/models/audiotranscriptionrequest.md: id: ebf59641bc84 - last_write_checksum: sha1:c55c97a06726812323a031897beffbb160021c05 - pristine_git_object: d7f5bd51b1289f0eb481d86a71bb483ee50bbc40 + last_write_checksum: sha1:a478d0656a0f69d4c426e548e2236b99730e2084 + pristine_git_object: 80bd53015ddee1bcecc7aeecc75152a19afc22c1 docs/models/audiotranscriptionrequeststream.md: id: 79b5f721b753 last_write_checksum: sha1:df6825c05b5a02dcf904ebaa40fb97e9186248cc @@ -224,18 +264,10 @@ trackedFiles: id: 8053e29a3f26 last_write_checksum: sha1:23a12dc2e95f92a7a3691bd65a1b05012c669f0f pristine_git_object: 95016cdc4c6225d23edc4436e11e4a7feacf1fe6 - docs/models/batchjobin.md: - id: 10f37fc761f1 - last_write_checksum: sha1:0acea471920959b7c85a015e557216c783de4e88 - pristine_git_object: 7dcf265dfe63cbbd13b7fa0e56fc62717f3ee050 - docs/models/batchjobout.md: - id: 49a98e5b2aba - last_write_checksum: sha1:b504fcf5a65567ec114fdc5b79cabe7554b36cac - pristine_git_object: 5f1011734b249a75cf9381d024f295fe31ff9f68 - docs/models/batchjobsout.md: - id: d8041dee5b90 - last_write_checksum: sha1:5e4127548b50abbb6cee267ac53a8e05f55b97f9 - pristine_git_object: 7a9d6f688e87851ed7ffa516523e12cb3f967c68 + docs/models/batchjob.md: + id: de2a00d0f739 + last_write_checksum: sha1:1160822c4032e1745dfaf37abcac02e78cbc4fb4 + pristine_git_object: 162e2cff3a1132f2b89e57dcf1bf8b4c403b6453 docs/models/batchjobstatus.md: id: 7e6f034d3c91 last_write_checksum: sha1:9e876b4b94255e1399bbb31feb51e08691bcb8fc @@ -248,22 +280,10 @@ trackedFiles: id: 9d14e972f08a last_write_checksum: sha1:1f32eb515e32c58685d0bdc15de09656194c508c pristine_git_object: f96f50444aaa23ca291db2fd0dc69db0d9d149d9 - docs/models/cancelbatchjobrequest.md: - id: db6860fe9ec3 - last_write_checksum: sha1:d2f55d5ffec21f6f70cc77c643c73113b0d1ed43 - pristine_git_object: f31f843bb864fc21ed620e4e069b8a97a091d99c - docs/models/cancelfinetuningjobrequest.md: - id: 10d341c56c9c - last_write_checksum: sha1:a484ad9d8eb791d60e5447b845b73871e9f1e6a3 - pristine_git_object: 6525788cd527eca4d89f95d4c829c1b3eda0f06e - docs/models/cancelfinetuningjobresponse.md: - id: 0c9ca281a898 - last_write_checksum: sha1:ac02c2a268a21430e74f8075671de0b97fd844e6 - pristine_git_object: c512342e575e9b6d57da08b20f50c86510d246d8 docs/models/chatclassificationrequest.md: id: 57b86771c870 - last_write_checksum: sha1:2ee5fff26c780ade7ed89617358befa93a6dfd23 - pristine_git_object: 910d62ae20fc67e9a3200397aeab95513bfed90f + last_write_checksum: sha1:bfd2fb8e2c83578ca0cea5209ea3f18c3bcd2ae5 + pristine_git_object: ba9c95eab2c1e4f080e39e8804a5de222e052ee6 docs/models/chatcompletionchoice.md: id: 0d15c59ab501 last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 @@ -274,8 +294,8 @@ trackedFiles: pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b docs/models/chatcompletionrequest.md: id: adffe90369d0 - last_write_checksum: sha1:f6eec11c908ee6581e508fff98e785441c4b84ad - pristine_git_object: f3abeeff4346c181cfca40eb819a8c6ecf656026 + last_write_checksum: sha1:4980b698006c641b1c84495c5b601cc8662b05f6 + pristine_git_object: 921161faf38b2f4d4648d6d744c08a96ed38f0a6 docs/models/chatcompletionrequestmessage.md: id: 3f5e170d418c last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 @@ -294,8 +314,8 @@ trackedFiles: pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b docs/models/chatcompletionstreamrequest.md: id: cf8f29558a68 - last_write_checksum: sha1:7ed921e0366c1b00225c05e60937fb8d228f027b - pristine_git_object: 42792d396462dead9d7a80a87f05a0888efe348b + last_write_checksum: sha1:c54d4a32d0d65533b79c381174690e9b735b2800 + pristine_git_object: 8761f000d4249de86265bc63da785cd807c2e7a5 docs/models/chatcompletionstreamrequestmessage.md: id: 053a98476cd2 last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 @@ -324,10 +344,10 @@ trackedFiles: id: aec173bca43b last_write_checksum: sha1:14ce49ace5845bc467fe1559b12374bfd36bc9a7 pristine_git_object: ff1c6ea32233d5c5e8d6292c62f9e8eacd3340c3 - docs/models/checkpointout.md: - id: 909ce66e1f65 - last_write_checksum: sha1:89e678d55b97353ad1c3b28d9f1ab101f6be0928 - pristine_git_object: 053592d2c57c43220bec3df27cc1486554178955 + docs/models/checkpoint.md: + id: 9c97119961cf + last_write_checksum: sha1:0e7732d9c30f67d59fe4d9ad1d165ad0cd80c790 + pristine_git_object: f7f35530c0d57aca02b2503e968a9a262bb1a10d docs/models/classificationrequest.md: id: 6f79e905a3fa last_write_checksum: sha1:3e083210e1cfdd3539e714928688648673767ae8 @@ -344,58 +364,54 @@ trackedFiles: id: 97a5eab5eb54 last_write_checksum: sha1:41269d1372be3523f46cb57bd19292af4971f7c0 pristine_git_object: f3b10727b023dd83a207d955b3d0f3cd4b7479a1 - docs/models/classifierdetailedjobout.md: - id: a2084ba5cc8c - last_write_checksum: sha1:ee206a5c68bd7aed201f8274d0710e8c570a35d2 - pristine_git_object: fb532449458fb445bb79d3fa0ed8e6faa538f00a - docs/models/classifierdetailedjoboutintegration.md: - id: 7a775cbd4d9f - last_write_checksum: sha1:6b2691766c1795d17b1572076a693eb377c5307f - pristine_git_object: 9dfa6e8a179529bd12fb8935c264e3c57c62cb41 - docs/models/classifierdetailedjoboutstatus.md: - id: a98493f9d02d - last_write_checksum: sha1:3441d9961e9093d314dd1bc88df1743cd12866d2 - pristine_git_object: c3118aafa8614f20c9adf331033e7822b6391752 - docs/models/classifierftmodelout.md: - id: 268ac482c38b - last_write_checksum: sha1:46bdbe1176bbf43dd79a4ff8255129fd82bd97bc - pristine_git_object: 6e7afbbed075efe2e29f42b7bc3d758fe47460d4 - docs/models/classifierjobout.md: - id: 2e3498af3f8c - last_write_checksum: sha1:70845cc24cd48987552ca337ea5522066e6de1b9 - pristine_git_object: ceecef5decdbd74a9741401ad0f1a9e8e215ae82 - docs/models/classifierjoboutintegration.md: - id: 30a340fed57d - last_write_checksum: sha1:72dfda442a88f977f3480c95127534a600362806 - pristine_git_object: 33af8a708618c1e54c7f55e67c8848fe45217799 - docs/models/classifierjoboutstatus.md: - id: 2411c6bf3297 - last_write_checksum: sha1:6ceef218b783505231a0ec653292460e6cb1a65b - pristine_git_object: 4520f1648323227863f78f7f86b2b4567bb7ace7 - docs/models/classifiertargetin.md: - id: 90d2da204677 - last_write_checksum: sha1:18fca3deee476b3dd23d55a9a40ced96cdc21f83 - pristine_git_object: 78cab67b4ced9fd0139a1dc4e6b687de870f9c62 - docs/models/classifiertargetout.md: - id: 1ce5c0513022 - last_write_checksum: sha1:2b8ed8a25b6ea6f2717cb4edcfa3f6a1ff3e69e4 - pristine_git_object: 57535ae5cb7d30177d1800d3597fe2f6ec3ad024 + docs/models/classifierfinetunedmodel.md: + id: b67a370e0ef1 + last_write_checksum: sha1:5fe3c26e337083716dd823e861924a03c55ce293 + pristine_git_object: ad05f93147d6904ee62602480c24644ec5e4cf63 + docs/models/classifierfinetuningjob.md: + id: 5bf35c25183f + last_write_checksum: sha1:afedddfe38e217189b5ec12ded74606c3b1e4c59 + pristine_git_object: 369756ba16a4c64f03cb6bb5da9bc0abd2a8eac6 + docs/models/classifierfinetuningjobdetails.md: + id: c91d53e010d5 + last_write_checksum: sha1:59a4c11a0d52b02ffc48e011a40fb4ebb1604825 + pristine_git_object: c5efdf1c817b978506a4862991a0f8eab8b219fb + docs/models/classifierfinetuningjobdetailsintegration.md: + id: e6c161ac2a44 + last_write_checksum: sha1:6450686e7f92ac8c1c02fcea82d5855ca6738b46 + pristine_git_object: 438a35d9eb0e4250a9e6bcbb7dafeb26d74e018a + docs/models/classifierfinetuningjobdetailsstatus.md: + id: 87737e85b845 + last_write_checksum: sha1:2ff02df3efee0f9b5867045d43fc71025fb37129 + pristine_git_object: 058c65832188f7148d96ab320114d984d618efa1 + docs/models/classifierfinetuningjobintegration.md: + id: 91de20176a8c + last_write_checksum: sha1:e49a7c082300eb4d3106e96b21ebc6860060b8c3 + pristine_git_object: 820aee4c6fcf899341d869d796b1a61d4d4eab42 + docs/models/classifierfinetuningjobstatus.md: + id: e3c4e672dc88 + last_write_checksum: sha1:1bfd306ab633d3ea73272e56796c1f63843fce22 + pristine_git_object: ca829885de056c5ccafec0fe3a901743e56deb0c + docs/models/classifiertarget.md: + id: 4c5c0b3e0bc7 + last_write_checksum: sha1:ad16823def0acb267543c4189df32406a27685aa + pristine_git_object: f8c99e2e7e6653d0e809506861ec4c25571cb5c9 + docs/models/classifiertargetresult.md: + id: c78d27aec276 + last_write_checksum: sha1:17c37c10385019953d6085fff6681808f950693f + pristine_git_object: ccadc623493bfa946dc2cccf894364b1e6b8b452 docs/models/classifiertrainingparameters.md: id: 9370e1ccd3d5 last_write_checksum: sha1:03f7c32717792966afdec50cb9dc1c85bb99dd84 pristine_git_object: 3b6f3be6942bbcf56261f773864a518d16923880 - docs/models/classifiertrainingparametersin.md: - id: 8bcca130af93 - last_write_checksum: sha1:7e9d61d3377031c740ea98d6c3dc65be99dc059b - pristine_git_object: 1287c973fae9762310597fbeceaef26865ace04f docs/models/codeinterpretertool.md: id: f009740c6e54 - last_write_checksum: sha1:bce278ce22703246613254ee2dac57f8b14e8060 - pristine_git_object: 544cda9358faf6ec525d06f78068817aee55b193 + last_write_checksum: sha1:a2114d61a98a48b4365a77c0c75c06ca834367ad + pristine_git_object: 6302fc627d7c49442b6c9aec19c70fdceaf7c519 docs/models/completionargs.md: id: 3b54534f9830 - last_write_checksum: sha1:c0368b7c21524228939b2093ff1a4524eb57aeb7 - pristine_git_object: 60d091374a80418892df9700dc0c21e7dad28775 + last_write_checksum: sha1:7432daccf23d8963a65fa4f2b103ea0396fbfbeb + pristine_git_object: 148f760859636e8c32259604698785663491a445 docs/models/completionargsstop.md: id: 40b0f0c81dc8 last_write_checksum: sha1:2a576618c62d4818af0048ed3a79080149a88642 @@ -404,46 +420,46 @@ trackedFiles: id: 60cb30423c60 last_write_checksum: sha1:61b976fe2e71236cf7941ee1635decc31bd304b2 pristine_git_object: 7f8ab5e631e2c6d1d9830325e591a7e434b83a35 - docs/models/completiondetailedjobout.md: - id: 634ca7241abd - last_write_checksum: sha1:7899568eedfa04cccb5b07c2e0d1e821af8fb0a2 - pristine_git_object: bc7e5d1cb5c298d0d935a9e3472ad547b5b9714c - docs/models/completiondetailedjoboutintegration.md: - id: f8d1f509f456 - last_write_checksum: sha1:3317db3f71962623a6144e3de0db20b4abfd5b9b - pristine_git_object: 9e526053160cc393dc65242cff8f8143bc67e38c - docs/models/completiondetailedjoboutrepository.md: - id: a8e7452065a7 - last_write_checksum: sha1:b1910efc6cd1e50391cd33daef004441bac3d3cd - pristine_git_object: 92a7b75c51f27e73ca41d5ffee28921057959878 - docs/models/completiondetailedjoboutstatus.md: - id: c606d38452e2 - last_write_checksum: sha1:1e9a5736de32a44cf539f7eaf8214aad72ec4994 - pristine_git_object: b80525bad8f6292892d8aee864a549c8ec52171c docs/models/completionevent.md: id: e57cd17cb9dc last_write_checksum: sha1:4f59c67af0b11c77b80d2b9c7aca36484d2be219 pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d - docs/models/completionftmodelout.md: - id: 93fed66a5794 - last_write_checksum: sha1:ee4bccae36229f23b1db8894585cc8e88ad71f6d - pristine_git_object: ccd4844fab92d000de1cc9ba59c884e31dc5db26 - docs/models/completionjobout.md: - id: 77315b024171 - last_write_checksum: sha1:a08ca1dcedbb9b88b9909a4b03251e2fb0cd8319 - pristine_git_object: 5eb44eef73872b0f1c2709381fc0852e3b3e224b - docs/models/completionjoboutintegration.md: - id: 25e651dd8d58 - last_write_checksum: sha1:59711a3fa46d6a4bff787a61c81ecc34bdaaec2e - pristine_git_object: 6474747bf8d38485f13b1702e3245ef9e0f866a9 - docs/models/completionjoboutrepository.md: - id: 2c94b3ecacf1 - last_write_checksum: sha1:2cb5b23640eeaf87f45dc9f180247ed7a6307df7 - pristine_git_object: 52f65558f8b3663596642d8854df36d29858beae - docs/models/completionjoboutstatus.md: - id: b77ebfd0e4f0 - last_write_checksum: sha1:b8f33134c63b12dc474e7714b1ac19d768a3cbbd - pristine_git_object: 917549450a096397d9a7ca0b8f5856f7cd62db04 + docs/models/completionfinetunedmodel.md: + id: 23a7705a9c89 + last_write_checksum: sha1:50d173b7505a97435c9d7ccb4fa99af04a51c6a2 + pristine_git_object: 0055db021f1c039c84cf7cfecd654683d2f9996f + docs/models/completionfinetuningjob.md: + id: 13c69dd18690 + last_write_checksum: sha1:b77e82f00f851034999986ff67aea5b0b558fbd2 + pristine_git_object: 83c0ae7e551e1f70df8dad4dce75ad20fe2b7ae7 + docs/models/completionfinetuningjobdetails.md: + id: b285f80afd59 + last_write_checksum: sha1:6ced5483d8249d7e8f502ec3f53f45d76e348003 + pristine_git_object: 3c54e874bcd968a9d5d9c8b3285632ba71364763 + docs/models/completionfinetuningjobdetailsintegration.md: + id: 27662795c95f + last_write_checksum: sha1:655f03341ad1b590ec451288607cec61024bfefc + pristine_git_object: 38f6a34963db4a653ec7aa7f0c85b68e837ebafc + docs/models/completionfinetuningjobdetailsrepository.md: + id: 023920eecc9e + last_write_checksum: sha1:2b8ba6ff115fda4cc6ed74825fb09b9500d915f6 + pristine_git_object: c6bd67cde1d1628aa3efc4a53fa8487a009aa129 + docs/models/completionfinetuningjobdetailsstatus.md: + id: b1b717a4e256 + last_write_checksum: sha1:97c8699f0979978ea4320da3388e18da6219cb87 + pristine_git_object: 94d795a9ba4ec743f274d4ab5666e8897d174c61 + docs/models/completionfinetuningjobintegration.md: + id: 392ffc2cdef2 + last_write_checksum: sha1:53540da44e0edbad5d4085f81ded159dbc582a6c + pristine_git_object: dbe57417d78f1de798c6eaea7e56984e3b002cb9 + docs/models/completionfinetuningjobrepository.md: + id: deb47b72e8e4 + last_write_checksum: sha1:c0fd43a01c2f763c7945311741ee3c2b9c7520f6 + pristine_git_object: 54225e27204b703a6b33d2d66492e272559c3b3c + docs/models/completionfinetuningjobstatus.md: + id: 2ac420312815 + last_write_checksum: sha1:90f498cb04e89e8f4a424762c07231fd9030b326 + pristine_git_object: db151a1bd871a2bf231424a78c8c450b2a488099 docs/models/completionresponsestreamchoice.md: id: d56824d615a6 last_write_checksum: sha1:0296a490df009dbfd04893fdebcc88dd6102a872 @@ -456,26 +472,26 @@ trackedFiles: id: b716b0195d39 last_write_checksum: sha1:1d8d7c469f933ea741ec15c8b9ef8b986e0ca95e pristine_git_object: 4746a95df18c78331f572425a16b2b3dcbc2df4c - docs/models/completiontrainingparametersin.md: - id: 7223a57004ab - last_write_checksum: sha1:8f77e5fe2ce149115b0bda372c57fafa931abd90 - pristine_git_object: 9fcc714e5f000e6134f7f03f1dd4f56956323385 + docs/models/confirmation.md: + id: 19b9e48a3c2e + last_write_checksum: sha1:eb6494cb19f23c6df62afb009cc88ce38d24af86 + pristine_git_object: fd6e6aaa58cabba0cdec1b76ac50fb6e46f91b07 docs/models/contentchunk.md: id: d2d3a32080cd last_write_checksum: sha1:b253e4b802adb5b66d896bfc6245ac4d21a0c67c pristine_git_object: cb7e51d3a6e05f197fceff4a4999594f3e340dac docs/models/conversationappendrequest.md: id: 722746e5065c - last_write_checksum: sha1:1677ab5b06748a7650464c0d7596e66e6759ede2 - pristine_git_object: 1cdb584b62423072f9a7cdc61f045b0d161525df + last_write_checksum: sha1:c8a4a49f0a1fe5cdd2ef6264ef9c600cfc8f7beb + pristine_git_object: 78a96508e4e1c6f83de4556d0bfa3b10c875da37 docs/models/conversationappendrequesthandoffexecution.md: id: e3f56d558037 last_write_checksum: sha1:dc71c8db746bb08f6630e995cf6af9fda747e954 pristine_git_object: 7418b36a55fab959639aec456a946600eb908efb docs/models/conversationappendstreamrequest.md: id: e9f8131435e8 - last_write_checksum: sha1:559d90bbf6d64f46221edaa6482837f0ee3b0626 - pristine_git_object: a8516ea7fc7db1d6bc0abb8f99b967a1715ceb4b + last_write_checksum: sha1:3afe7eaafbf61abcd9341ee8fbca5c6d0c2db0ab + pristine_git_object: daea9c522a8a0693edce11b1bbeca1f2cba0781e docs/models/conversationappendstreamrequesthandoffexecution.md: id: 5739ea777905 last_write_checksum: sha1:c85584b63c0c5d859ee5d46d6ae167a8ee44e279 @@ -490,24 +506,16 @@ trackedFiles: pristine_git_object: 5452d7d5ce2aa59a6d89c7b7363290e91ed8a0a3 docs/models/conversationhistory.md: id: 7e97e8e6d6e9 - last_write_checksum: sha1:06df76a87aca7c5acd5a28ca3306be09a8bb541b - pristine_git_object: c8baad0b597ddb9148413a651a659b06c20351ac - docs/models/conversationhistoryobject.md: - id: 088f7df6b658 - last_write_checksum: sha1:bcce4ef55e6e556f3c10f65e860faaedc8eb0671 - pristine_git_object: a14e7f9c7a392f0d98e79cff9cc3ea54f30146fa + last_write_checksum: sha1:719a7c0722f3ad2e9f428dd31abf7bd0bad197d2 + pristine_git_object: daefe3363fb57d9a7d2737d3ea3d6e6f61021d49 docs/models/conversationinputs.md: id: 23e3160b457d last_write_checksum: sha1:0c6abaa34575ee0eb22f12606de3eab7f4b7fbaf pristine_git_object: 86db40ea1390e84c10a31155b3cde9066eac23b0 docs/models/conversationmessages.md: id: 46684ffdf874 - last_write_checksum: sha1:01ccdc4b509d5f46ff185f686d332587e25fc5b7 - pristine_git_object: c3f00979b748ad83246a3824bb9be462895eafd6 - docs/models/conversationmessagesobject.md: - id: b1833c3c20e4 - last_write_checksum: sha1:bb91a6e2c89066299660375e5e18381d0df5a7ff - pristine_git_object: db3a441bde0d086bccda4814ddfbf737539681a6 + last_write_checksum: sha1:5b10a9f3f19591a2675979c21dd8383d5249d728 + pristine_git_object: 8fa51571697ee375bfbc708de854bc0b1129eec7 docs/models/conversationrequest.md: id: dd7f4d6807f2 last_write_checksum: sha1:e4da423f9eb7a8a5d0c21948b50e8df08a63552c @@ -526,16 +534,12 @@ trackedFiles: pristine_git_object: 2e4e8d01b5482c4e0644be52e55bf6912aeff69e docs/models/conversationresponse.md: id: 2eccf42d48af - last_write_checksum: sha1:17ebabdf1dd191eeac442046511c44120dfa97a1 - pristine_git_object: e31821288dd18bf425e442787f67a69ea35ff6a6 - docs/models/conversationresponseobject.md: - id: 6c028b455297 - last_write_checksum: sha1:76270a07b86b1a973b28106f2a11673d082a385b - pristine_git_object: bea66e5277feca4358dd6447959ca945eff2171a + last_write_checksum: sha1:8a86a4d0df6d13b121d5e41a8ee45555b69bf927 + pristine_git_object: 2732f785cdd706274ec5ff383f25fc201e6d0f78 docs/models/conversationrestartrequest.md: id: 558e9daa00bd - last_write_checksum: sha1:0e33f56f69313b9111b3394ecca693871d48acfa - pristine_git_object: d98653127fd522e35323b310d2342ccc08927962 + last_write_checksum: sha1:434e6c94b5d6c37b9026d536308cd1d3ff56e8d6 + pristine_git_object: ad3ff3624f533e4d4f751264d9bc6dd1849b3b69 docs/models/conversationrestartrequestagentversion.md: id: e6ea289c6b23 last_write_checksum: sha1:a5abf95a81b7e080bd3cadf65c2db38ca458573f @@ -546,8 +550,8 @@ trackedFiles: pristine_git_object: 5790624b82ce47ea99e5c25c825fbc25145bfb8e docs/models/conversationrestartstreamrequest.md: id: 01b92ab1b56d - last_write_checksum: sha1:aa3d30800417e04f741324d60529f3190ea9cd16 - pristine_git_object: a5f8cbe73ed1ce28c82d76f0e9f933bda64f733c + last_write_checksum: sha1:e9755598b5be197a938f1f74aa77ac24ccac8457 + pristine_git_object: 865a1e8f666d7f6878c40eb70fe5ab1c63da3066 docs/models/conversationrestartstreamrequestagentversion.md: id: 395265f34ff6 last_write_checksum: sha1:ebf4e89a478ab40e1f8cd3f9a000e179426bda47 @@ -572,58 +576,62 @@ trackedFiles: id: 71df6212ff44 last_write_checksum: sha1:f2882742a74dd2b4f74383efa444c7ab968249dc pristine_git_object: 0f75f82b38f224340bed468ceecfe622066740ba + docs/models/conversationthinkchunk.md: + id: b9a8324da8f1 + last_write_checksum: sha1:80aed188198434ceca134e7aa7351ddba82c92c9 + pristine_git_object: 1fb16bd99f2b6277f87cd40d5c1eca389819d725 + docs/models/conversationthinkchunkthinking.md: + id: 477db2d543bd + last_write_checksum: sha1:d9f8c37fe933a3e52e2adb3ffe283d79c187cd36 + pristine_git_object: 84b800188b248166aac0043994fa27d4d79aad9d docs/models/conversationusageinfo.md: id: 57ef89d3ab83 last_write_checksum: sha1:d92408ad37d7261b0f83588e6216871074a50225 pristine_git_object: 57e260335959c605a0b9b4eaa8bf1f8272f73ae0 - docs/models/createfinetuningjobresponse.md: - id: a9d31306296c - last_write_checksum: sha1:a15ccee66983fcc23321f966440d02fab4463178 - pristine_git_object: f82cd793b466b0028b586781d36c690c0e5f97cd - docs/models/createorupdateagentaliasrequest.md: - id: be33079aa124 - last_write_checksum: sha1:84cb72c549ee74c44dcf00b3f6a100060e322295 - pristine_git_object: af2591ebb584965f5110ed987993f3a72b513255 - docs/models/deleteagentaliasrequest.md: - id: c116b5c42b1b - last_write_checksum: sha1:51e1544cc867389120a2d1fbb4780c855690841e - pristine_git_object: 17812ec4a03b452a2d31950cc5a9e87a8f6d79f7 - docs/models/deleteagentrequest.md: - id: 6411b6df1c85 - last_write_checksum: sha1:1157d4717b75be91744bd7464c042e367faa4b71 - pristine_git_object: 0aaacae471dd81ddc5ce4808abdd2b5653503ff6 - docs/models/deleteconversationrequest.md: - id: 7247871c454c - last_write_checksum: sha1:a43ed3e32630fbb41921fa413ab2a26a914e425e - pristine_git_object: 39d9e5dfd52d9df1d1da7093761b65e0d12a0b40 - docs/models/deletedocumentrequest.md: - id: 898eebfc019e - last_write_checksum: sha1:f06a13be4484048cf15c21d46eb2d107057b39db - pristine_git_object: eb060099f1b078fd084551338b51ee6677e8d235 - docs/models/deletefileout.md: - id: c7b84242a45c - last_write_checksum: sha1:f2b039ab88fc83ec5dd765cab8e2ed8cce7e417d - pristine_git_object: 4709cc4958d008dc24430deb597f801b91c6957f - docs/models/deletefilerequest.md: - id: ca151d3da83a - last_write_checksum: sha1:ec50f13b099a6ef28d7965f7c8721ce1f505f7d2 - pristine_git_object: bceae901954471a8667a3a61e66da6361ef50190 - docs/models/deletelibraryaccessrequest.md: - id: ca39ae894c1f - last_write_checksum: sha1:41b7cd5c2e4616d3edefeb271dd7089fa04bd67d - pristine_git_object: c7034b98c30234a0a8cb368d84d9b287690027de - docs/models/deletelibraryrequest.md: - id: 4be1af37ab41 - last_write_checksum: sha1:2769939a702c26be619f6c455cd48365b64110cc - pristine_git_object: c229ad73b2a7c39dab0ccdfa29e1f0475f0cdc7b + docs/models/createagentrequest.md: + id: 9484bab389c1 + last_write_checksum: sha1:b3228a622081b6f4b2a8bdaa60ca16049517d819 + pristine_git_object: cca3a079c532d3426f65a15bb0affdd34fd1d3ac + docs/models/createagentrequesttool.md: + id: 72e5f99878c5 + last_write_checksum: sha1:a90ad01c15da321f0c8ec700ba359a5371c5dcbb + pristine_git_object: c6ed3e98566eb684932fae9d2648a85c84443493 + docs/models/createbatchjobrequest.md: + id: e79afe8f495c + last_write_checksum: sha1:6cedce49f3108b9d5bc80e6d11712c594f2d9e50 + pristine_git_object: d094e2d518b31ada68c282241af3aa1483e98ff6 + docs/models/createfileresponse.md: + id: ea1396cebae8 + last_write_checksum: sha1:7b26d0a466004aca5cefaeb29f84dafc405c51ff + pristine_git_object: 8152922b0d4ce199e269df955e5a25d4acf71e28 + docs/models/createfinetuningjobrequest.md: + id: 36824ba035ff + last_write_checksum: sha1:78f019530e9f5deace91c454c91ec6c4d0d23a20 + pristine_git_object: a93e323d5dd474c6d287e1203e85b79d11d762f0 + docs/models/createfinetuningjobrequestintegration.md: + id: e41b5575b494 + last_write_checksum: sha1:06dab95269f4a571a4c62a7f956fbf0250a0e8b3 + pristine_git_object: 0054a4a683a88fe67f92c1659bcb8c792ca8d286 + docs/models/createfinetuningjobrequestrepository.md: + id: e113eb1929b5 + last_write_checksum: sha1:6bd504d3ecb219f3245a83d306c1792133b96769 + pristine_git_object: 32be1b6dc3fcf7f6ee1a1d71abee4c81493655c2 + docs/models/createlibraryrequest.md: + id: 8935b2ed9d13 + last_write_checksum: sha1:c00abfe1abb0f0323e434b084dafa0d451eb3e51 + pristine_git_object: 71562806dbec6444dcdd0a19852a31ca00b6229a + docs/models/deletefileresponse.md: + id: ab3aa44589a0 + last_write_checksum: sha1:47ebc2474e4725e9ecb0f0d5940c604d9a82a4df + pristine_git_object: 188e2504606b051674352339c6aa999116a43b61 docs/models/deletemodelout.md: id: 5643e76768d5 last_write_checksum: sha1:1593c64f7673e59b7ef1f4ae9f5f6b556dd6a269 pristine_git_object: 5fd4df7a7013dcd4f6489ad29cdc664714d32efd - docs/models/deletemodelrequest.md: - id: 22c414d48ee4 - last_write_checksum: sha1:a60f549577b3461cb7552ad2080a34ad389f8579 - pristine_git_object: d80103f1610668292589b6d7b861de814c17afda + docs/models/deletemodelv1modelsmodeliddeleterequest.md: + id: c838cee0f093 + last_write_checksum: sha1:e5b6d18b4f8ab91630ae34a4f50f01e536e08d99 + pristine_git_object: d9bc15fe393388f7d0c41abce97ead17e35e2ba4 docs/models/deltamessage.md: id: 6c5ed6b60968 last_write_checksum: sha1:00052476b9b2474dbc149f18dd18c71c86d0fc74 @@ -634,40 +642,28 @@ trackedFiles: pristine_git_object: 8142772d7ea33ad8a75cf9cf822564ba3f630de2 docs/models/document.md: id: cd1d2a444370 - last_write_checksum: sha1:d00a2ac808a0ae83a7b97da87e647ecc8dca9c52 - pristine_git_object: 509d43b733d68d462853d9eb52fc913c855dff40 + last_write_checksum: sha1:77076e66dea6f4582e73ecc5a55ef750f026448a + pristine_git_object: 284babb98fbb0279bef2626fa18eada0035572c5 docs/models/documentlibrarytool.md: id: 68083b0ef8f3 - last_write_checksum: sha1:470b969fa4983c0e7ad3d513b4b7a4fa8d5f0f41 - pristine_git_object: 1695bad40cb0a1eb269e4ee12c6a81cbf0c7749a - docs/models/documentout.md: - id: a69fd1f47711 - last_write_checksum: sha1:ed446078e7194a0e44e21ab1af958d6a83597edb - pristine_git_object: 28df11eb1aef1fdaf3c1103b5d61549fb32ea85d + last_write_checksum: sha1:76b9f47c399915a338abe929cb10c1b37282eadf + pristine_git_object: 95c3fa52ee3ff29e72bc0240a98c0afaa0cd5f62 docs/models/documenttextcontent.md: id: 29587399f346 last_write_checksum: sha1:93382da0228027a02501abbcf681f247814d3d68 pristine_git_object: 989f49e9bcb29f4127cb11df683c76993f14eba8 - docs/models/documentupdatein.md: - id: 185ab27259a7 - last_write_checksum: sha1:e0faccd04229204968dbc4e8131ee72f81288182 - pristine_git_object: 0993886d56868aba6844824f0e0fdf1bdb9d74f6 + docs/models/documentunion.md: + id: c65f9e42375c + last_write_checksum: sha1:249043e03067f79b27dc6eac410fb937920e8cdb + pristine_git_object: e573bd4632493ca648ad61307c70148366625d4b docs/models/documentupload.md: id: 7ff809a25eb0 last_write_checksum: sha1:aea0f81009be09b153019abbc01b2918a1ecc1f9 pristine_git_object: 4e58a475f1776431c9c27a0fcdd00dd96257801f docs/models/documenturlchunk.md: id: 48437d297408 - last_write_checksum: sha1:38c3e2ad5353a4632bd827f00419c5d8eb2def54 - pristine_git_object: 6c9a5b4d9e6769be242b27ef0208f6af704689c0 - docs/models/documenturlchunktype.md: - id: a3574c91f539 - last_write_checksum: sha1:a0134fc0ea822d55b1204ee71140f2aa9d8dbe9c - pristine_git_object: 32e1fa9e975a3633fb49057b38b0ea0206b2d8ef - docs/models/downloadfilerequest.md: - id: 5acd7aafd454 - last_write_checksum: sha1:5d7056818ddc5860e43699917496ded68b91ddfa - pristine_git_object: 3f4dc6ccc6d1c67396fe97197067c5421d8dc2d5 + last_write_checksum: sha1:5f9294355929d66834c52c67990ba36a7f81387d + pristine_git_object: 9dbfbe5074de81b9fcf6f5bae8a0423fb2c82f71 docs/models/embeddingdtype.md: id: 22786e732e28 last_write_checksum: sha1:dbd16968cdecf706c890769d8d1557298f41ef71 @@ -700,10 +696,10 @@ trackedFiles: id: da9a99ab48ab last_write_checksum: sha1:4971db390327db09f88feff5d2b8a0b1e6c5b933 pristine_git_object: d934b6774b25713afe923154d7709755426ec2cf - docs/models/eventout.md: - id: 9960732c3718 - last_write_checksum: sha1:dbc23814b2e54ded4aa014d63510b3a2a3259329 - pristine_git_object: d9202353be984d51b9c05fb0f490053ce6ccfe4a + docs/models/event.md: + id: 311c22a8574a + last_write_checksum: sha1:627793d6aed5e378e3f2eeb4087808eb50e948d5 + pristine_git_object: 3eebffca874b8614a5be3d75be3cb7b0e52c2339 docs/models/file.md: id: 4ad31355bd1c last_write_checksum: sha1:ade4d3c908c664a07a3c333cc24bc1bfb43ab88b @@ -716,14 +712,30 @@ trackedFiles: id: ed6216584490 last_write_checksum: sha1:02767595f85228f7bfcf359f8384b8263580d53a pristine_git_object: 14cab13ee191ae60e2c5e1e336d0a5abc13f778b + docs/models/filesapiroutesdeletefilerequest.md: + id: 7fdf9a97320b + last_write_checksum: sha1:411e38d0e08a499049796d1557f79d669fc65107 + pristine_git_object: 1b02c2dbb7b3ced86ddb49c2323d1d88732b480c + docs/models/filesapiroutesdownloadfilerequest.md: + id: b9c13bb26345 + last_write_checksum: sha1:1f41dad5ba9bd63881de04d24ef49a0650d30421 + pristine_git_object: 8b28cb0e5c60ac9676656624eb3c2c6fdc8a3e88 + docs/models/filesapiroutesgetsignedurlrequest.md: + id: 08f3772db370 + last_write_checksum: sha1:26aa0140444ccef7307ef6f236932032e4784e8f + pristine_git_object: dbe3c801003c7bb8616f0c5be2dac2ab1e7e9fb1 + docs/models/filesapirouteslistfilesrequest.md: + id: 04bdf7c654bd + last_write_checksum: sha1:0a99755150c2ded8e5d59a96527021d29326b980 + pristine_git_object: 57d11722f1dba2640df97c22be2a91317c240608 + docs/models/filesapiroutesretrievefilerequest.md: + id: 2783bfd9c4b9 + last_write_checksum: sha1:a1249ef0aedb3056e613078488832c96b91f8cab + pristine_git_object: 961bae1f51a4ae9df21b28fd7a5ca91dc7b3888b docs/models/fileschema.md: id: 9a05a660399d last_write_checksum: sha1:97987d64285ff3092635754c78ad7b68d863e197 pristine_git_object: 4f3e72dba17a964155007755ad9d69f0304b2adb - docs/models/filesignedurl.md: - id: c0a57176d62e - last_write_checksum: sha1:2c64ef5abc75e617496f0a28d3e1cebfe269a6b9 - pristine_git_object: 52ce3f4f0c44df0ef3ed1918f92ad63f76ffc144 docs/models/fimcompletionrequest.md: id: b44677ecc293 last_write_checksum: sha1:24bcb54d39b3fabd487549a27b4c0a65dd5ffe50 @@ -748,6 +760,10 @@ trackedFiles: id: e16926b57814 last_write_checksum: sha1:52006811b756ff5af865ed6f74838d0903f0ee52 pristine_git_object: 34b24bd4db1ad3f9e77e2c6a45a41d2fbc5cf7fd + docs/models/finetunedmodelcapabilities.md: + id: 3a6a0498ccf7 + last_write_checksum: sha1:82fc7d3f4e0b591b757f202699bb645bc61c69ff + pristine_git_object: d3203a2adccb7eb89c58395952c3e5a123a5b31b docs/models/format_.md: id: a17c22228eda last_write_checksum: sha1:dad6de59fec6378d50356007602e2a0254d8d2e4 @@ -756,10 +772,6 @@ trackedFiles: id: b546cfde5aa6 last_write_checksum: sha1:752d9d238a90a3ef55205576fa38cee56ea1539e pristine_git_object: 919cdd384315c99d4b590bc562298403733344ce - docs/models/ftmodelcapabilitiesout.md: - id: f7be0dd1d889 - last_write_checksum: sha1:670412a0c0268f646dd444537bd79ce9440170c8 - pristine_git_object: 19690476c64ac7be53f974347c1618730f0013ce docs/models/ftmodelcard.md: id: 15ed6f94deea last_write_checksum: sha1:1c560ceaaacc1d109b2997c36de03192dfcda941 @@ -774,140 +786,72 @@ trackedFiles: pristine_git_object: 7ccd90dca4868db9b6e178712f95d375210013c8 docs/models/functioncallentry.md: id: 016986b7d6b0 - last_write_checksum: sha1:bd3e67aea9eb4f70064e67e00385966d44f73f24 - pristine_git_object: fd3aa5c575019d08db258842262e8814e57dc6d5 + last_write_checksum: sha1:373eb3a2d72596fcbb8933b28426896d5ac6b6f4 + pristine_git_object: 2843db9d36d8b82a15ebfce0833c8b0832609b4a docs/models/functioncallentryarguments.md: id: c4c609e52680 last_write_checksum: sha1:ae88aa697e33d60f351a30052aa3d6e2a8a3e188 pristine_git_object: f1f6e39e724673556a57059a4dbda24f31a4d4b9 - docs/models/functioncallentryobject.md: - id: ea634770754e - last_write_checksum: sha1:d6bc885e9689397d4801b76c1a3c8751a75cf212 - pristine_git_object: 3cf2e427bfb6f2bc7acea1e0c6aafe965187f63f - docs/models/functioncallentrytype.md: - id: b99da15c307b - last_write_checksum: sha1:04665a6718ad5990b3beda7316d55120fbe471b0 - pristine_git_object: 7ea34c5206bdf205d74d8d49c87ddee5607582e9 + docs/models/functioncallentryconfirmationstatus.md: + id: 18f36160d744 + last_write_checksum: sha1:cc3ea4e03d26a1b22f94d42a87bd5ae63535d266 + pristine_git_object: 8948beb6d9ac647ada655960284dfc7f6d1f5ca1 docs/models/functioncallevent.md: id: cc9f2e603464 - last_write_checksum: sha1:942d1bed0778ba4738993fcdbefe080934b641d5 - pristine_git_object: f406206086afa37cbc59aa551ac17a4814dddf7e + last_write_checksum: sha1:58c6ee00af0c63614fd7506345977f9f2d8838ec + pristine_git_object: 0e3a36d6045a69e96c40836cdb586424225775af + docs/models/functioncalleventconfirmationstatus.md: + id: a33cc7957407 + last_write_checksum: sha1:36ac2d3442d83cbb1256e86f413134296bf8e90f + pristine_git_object: 4a3c8774d4eec4e1f5fea23a1827082e09f91669 docs/models/functionname.md: id: 4b3bd62c0f26 last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb pristine_git_object: 87d7b4852de629015166605b273deb9341202dc0 docs/models/functionresultentry.md: id: 24d4cb18998c - last_write_checksum: sha1:528cae03e09e43bdf13e1a3fef64fd9ed334319b - pristine_git_object: 6df54d3d15e6d4a03e9af47335829f01a2226108 - docs/models/functionresultentryobject.md: - id: 025dc546525c - last_write_checksum: sha1:01a0085fb99253582383dd3b12a14d19c803c33c - pristine_git_object: fe52e0a5a848ea09dfb4913dd8d2e9f988f29de7 - docs/models/functionresultentrytype.md: - id: 69651967bdee - last_write_checksum: sha1:41489b0f727a00d86b313b8aefec85b4c30c7602 - pristine_git_object: 35c94d8e553e1cb641bef28fec2d8b3576d142f6 + last_write_checksum: sha1:1758992e30517b505b8d0622a54545dc9ae19163 + pristine_git_object: 6a77abfd27e3e46de950646d7f89777dca11300e docs/models/functiontool.md: id: 5fb499088cdf last_write_checksum: sha1:a9a3b6530b1c48a8575402b48cde7b65efb33a7d pristine_git_object: 0226b7045c9d82186e1111bb2025e96a4de90bd6 - docs/models/getagentagentversion.md: - id: 825de6d2614f - last_write_checksum: sha1:d99f384ff5ee73e68fa7f8581d7622068b5b7498 - pristine_git_object: 6d7b3f1d15994c24a5b992d1908fe8126da0e3ea - docs/models/getagentrequest.md: - id: 743f3a4630be - last_write_checksum: sha1:4d17d6b7b15e39520414085fc977be881e4e0a85 - pristine_git_object: 3f729dff0f7fc773f83593222da0dd0618b3e8b3 - docs/models/getagentversionrequest.md: - id: 4bf5feb4494a - last_write_checksum: sha1:d26546c2fdd78e0f52e2a2c50736b412ce814f6e - pristine_git_object: c98fee9d141f556520e16189e90234063e6861eb - docs/models/getbatchjobrequest.md: - id: 0c3a5debd663 - last_write_checksum: sha1:c186bbc6b04e1ed2db32f68fb22cb7eff4c1a90c - pristine_git_object: f3c67eb4a898a21e8a78c3340171458dcbd21d58 - docs/models/getconversationhistoryrequest.md: - id: 27de0e44ed80 - last_write_checksum: sha1:d89318332c87b5fa3bba22a52e209bdd5702b3f0 - pristine_git_object: fc90282bd9308a7531c3c532234fd332a223f243 - docs/models/getconversationmessagesrequest.md: - id: 82bf9b5c275b - last_write_checksum: sha1:cdbb0371c7a35e84f7938d28719acd843ebc15ce - pristine_git_object: fd037fea6c09d97bfb74332838a2b2760de4dccb - docs/models/getconversationrequest.md: - id: ad6c903380f6 - last_write_checksum: sha1:ee93a91d5daa01fc937dd09589b268bb2e42868a - pristine_git_object: 8a66a8b032cb67503c0f6b95c98e0a40b13d16ec - docs/models/getdocumentextractedtextsignedurlrequest.md: - id: d47f32212cf5 - last_write_checksum: sha1:7d695630988d5ab3773aabfe17c3fa9177d7e9c9 - pristine_git_object: ff703802ddfe0e36768daf87f4c5626028642370 - docs/models/getdocumentrequest.md: - id: 4208f9b571b3 - last_write_checksum: sha1:45f6807e2f7cd4c30f95304172cb556896571b76 - pristine_git_object: 29f62127b09511f14a065b9b6f6068e63643ab7c - docs/models/getdocumentsignedurlrequest.md: - id: 734960a10101 - last_write_checksum: sha1:04debc445e51e7d0f922bfe7873d639a844c17b4 - pristine_git_object: 72a179c086e38650afd81165575c7926d9566f69 - docs/models/getdocumentstatusrequest.md: - id: d0a69468ea34 - last_write_checksum: sha1:a8d91948737e4fa392221ec18970d27af90c203e - pristine_git_object: 3557d7738be21206061ef5806b79118432b33f26 - docs/models/getdocumenttextcontentrequest.md: - id: 6baa6485417b - last_write_checksum: sha1:5b47d1d8d5675e4b9f477c8034ef64afc912cd06 - pristine_git_object: 8593340139f28b44dfed455849198f5d5a457643 - docs/models/getfilesignedurlrequest.md: - id: c7b1953174af - last_write_checksum: sha1:d558115d1611827f461cc6a9f373885271c7a51d - pristine_git_object: 0be3b2888b0680d5a5fac0057cedc279d112ddb8 - docs/models/getfinetuningjobrequest.md: - id: c18796fe85f3 - last_write_checksum: sha1:8166520e2d657098131fd77c81a86099ed4d3486 - pristine_git_object: f20cb2148330c7078c6e93f55aa99f1b09086eaf - docs/models/getfinetuningjobresponse.md: - id: 8f50d4a61ae1 - last_write_checksum: sha1:509e8d190b43b5a4a3e0ae7d97bf2b4262fcd1f8 - pristine_git_object: 1b0568dd8019879ec2e1d0ff039296f600415e21 - docs/models/getlibraryrequest.md: - id: 9c9a9e6c4f03 - last_write_checksum: sha1:822494a821ee3a51a477f305c140ed39cd6465fc - pristine_git_object: 2a3acf50a6300ea3bcbc3b8432fe28cbef82c620 + docs/models/getfileresponse.md: + id: a983b3c8acd6 + last_write_checksum: sha1:5ca732ae5b384937473c04de6736fbab34deca24 + pristine_git_object: 0edd13e0818fc70c9c4db1e08b1490c1e146ea63 + docs/models/getsignedurlresponse.md: + id: 5539e5d7c3d4 + last_write_checksum: sha1:7198474f48bfba6d47326cd436e4a00a8ba70ce3 + pristine_git_object: bde693236406fe092f48c315e3b68a2fbbe6f9a4 + docs/models/githubrepository.md: + id: 66c120df624b + last_write_checksum: sha1:045e538dd7faffc1c6c6e6816563c5c8e776a276 + pristine_git_object: 827b6f34ae68ace7b8b4811764f59de2e0fcdd22 docs/models/githubrepositoryin.md: id: b42209ef8423 last_write_checksum: sha1:5ab33fc1b0b5513086b1cae07f416d502441db23 pristine_git_object: 241cf584d5e2425e46e065f47a18bea50fa624db - docs/models/githubrepositoryout.md: - id: 0ca86e122722 - last_write_checksum: sha1:0e3999cef8a745ae24ac36907b3431bc5103ea6f - pristine_git_object: fe38393a0cc2eb5c0b0c4690cb0c4e5e3ec41df8 - docs/models/httpvalidationerror.md: - id: a211c095f2ac - last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e - pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc docs/models/hyperparameters.md: id: c167bad5b302 - last_write_checksum: sha1:5b7f76360dea58be5350bbe074482da45e57599c - pristine_git_object: 46a6dd6baa1b1574bad5eadc1e83d4b72d56c0c8 + last_write_checksum: sha1:e391cf72690e6cd01a2878081b8d87938e1c6639 + pristine_git_object: b6c00c3647d21789c92ad7d32dd29c3089ca134f + docs/models/imagedetail.md: + id: f8217529b496 + last_write_checksum: sha1:fdf19ac9459f64616240955cb81a84ef03e775c8 + pristine_git_object: 1e5ba3fd405a14e5e2872cc85504584dca19b726 docs/models/imagegenerationtool.md: id: d5deb6b06d28 - last_write_checksum: sha1:b3decee8fe7a824401f9afbd3544a69ccde4ef8e - pristine_git_object: 0c8de72cdd7149217010ae5d02777d1c5dd9896c + last_write_checksum: sha1:a1813ef99e4a0990fd073bb2311c475e88072029 + pristine_git_object: b476b6f2733a49767d7f7a4ad091fc321ab514f4 docs/models/imageurl.md: id: e75dd23cec1d - last_write_checksum: sha1:30131c77dd240c3bae48d9693698358e5cc0ae63 - pristine_git_object: 7c2bcbc36e99c3cf467d213d6a6a59d6300433d8 + last_write_checksum: sha1:a5cf621ce58a9cc7c96afa7de53367eac7b4cb0b + pristine_git_object: 6358e0acb2dea4816203413842243704ca955783 docs/models/imageurlchunk.md: id: 4407097bfff3 - last_write_checksum: sha1:73e14a0beccfc9465ee6d2990462e609903f5cd5 - pristine_git_object: 43078c7849fb3e808c2eaeaa5a3caeab2619d700 - docs/models/imageurlchunktype.md: - id: b9af2db9ff60 - last_write_checksum: sha1:990546f94648a09faf9d3ae55d7f6ee66de13e85 - pristine_git_object: 2064a0b405870313bd4b802a3b1988418ce8439e + last_write_checksum: sha1:da7a792f7b649f311062338dfbf3d25ff55fe6c5 + pristine_git_object: db0c53d22e29fa25222edb86b264e5135879a029 docs/models/imageurlunion.md: id: 9d3c691a9db0 last_write_checksum: sha1:4e32bcd7d44746d2ddbfafbef96152bb2bdb2a15 @@ -918,124 +862,184 @@ trackedFiles: pristine_git_object: b44a467d258cfa8cc3d2a3236330471dbc3af109 docs/models/inputs.md: id: 4b0a7fb87af8 - last_write_checksum: sha1:19d8da9624030a47a3285276c5893a0fc7609435 - pristine_git_object: 0f62a7ce8e965d0879507e98f808b9eb254282a6 - docs/models/inputsmessage.md: - id: 174dcada287d - last_write_checksum: sha1:92a95c1757e33603d1aa9ed6c9912d1c551d9974 - pristine_git_object: e3543fb4f9fff679b25f7f803eb2e8dabd56368f + last_write_checksum: sha1:c5f0c21c25fd5a698398a9e4ddf6261add60740c + pristine_git_object: d5771207d9725f04ca2ab1be692fc089360a58f4 docs/models/instructrequest.md: id: a0034d7349a2 last_write_checksum: sha1:34a81411110cbb7a099c45e482f5d1702ae48fd3 pristine_git_object: 5f0cdfff135fb72d3b1a81999a30b720c044e3d4 - docs/models/instructrequestinputs.md: - id: 2a677880e32a - last_write_checksum: sha1:64bcc6371d70446da60f167682504568d7f2618c - pristine_git_object: 931ae5e47df2d2850e3ef6740e2b89e1e0138297 docs/models/instructrequestmessage.md: id: 380503708a09 last_write_checksum: sha1:551b5d6dd3ba0b39cad32478213a9eb7549f0023 pristine_git_object: 57ed27ab3b1430514797dd0073bc87b31e5e3815 - docs/models/jobin.md: - id: 1b7b37214fa8 - last_write_checksum: sha1:0a241378cf3791c5c3fa733f30d45c07ef841448 - pristine_git_object: 62da90727898dd84f547c436c17fefa788e4f0d6 - docs/models/jobinintegration.md: - id: 200c505fa67f - last_write_checksum: sha1:c9887897357e01e6e228b48d6bf0c3fb4edd29f7 - pristine_git_object: 103820e7ec55769227610c385addbecfcd075cae - docs/models/jobinrepository.md: - id: 9ab1d5469c10 - last_write_checksum: sha1:1773f59546b94688d0be16d3f5f014cd86f5b1d7 - pristine_git_object: e873ae63f359d6ac4aca03b058a7c25fbbf2ba32 - docs/models/jobmetadataout.md: - id: 30eb634fe247 - last_write_checksum: sha1:46d54b6f6004a6e571afd5207db5170dfbce7081 - pristine_git_object: 6218a161b71abbb35eb4ca6e3ce664226983efc2 - docs/models/jobsout.md: - id: cbe31f43047d - last_write_checksum: sha1:4bd9ffbd2e5a286090167c795b9c3970e3c7d0a5 - pristine_git_object: 69f8342ac6f02a6e60d05b6f5b3cd892964fd3d7 - docs/models/jobsoutdata.md: - id: 809574cac86a - last_write_checksum: sha1:06455044d314c4edbd1ce4833d551c10918f0a3e - pristine_git_object: 28cec31117416b79eb8688d84b47b157974574cc + docs/models/jobmetadata.md: + id: 1f8e4c2f49e5 + last_write_checksum: sha1:a29ec10cd129b955672f60aaf526905780afe6b6 + pristine_git_object: 5d8a89ddc6b401a80e23d51cb378cdac5d4eb342 + docs/models/jobsapiroutesbatchcancelbatchjobrequest.md: + id: 798cb1ca1385 + last_write_checksum: sha1:67e8bda117608aee0e09a702a1ef8a4b03c40b68 + pristine_git_object: c19d0241784ff69bc68a11f405437400057d6f62 + docs/models/jobsapiroutesbatchgetbatchjobrequest.md: + id: e83a7ec84f8a + last_write_checksum: sha1:d661875832b4b9d5f545262216c9fcb9a77c8cd0 + pristine_git_object: 8c259bea9bef11f779fd609f1212565d574457e2 + docs/models/jobsapiroutesbatchgetbatchjobsrequest.md: + id: 5b9c44ad4d31 + last_write_checksum: sha1:1d7c05337b7cfe68f85a36576d060e1a890f9f96 + pristine_git_object: 5ceb0b2c40f079ffbe2cc4c82f6c3f94276980b4 + docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md: + id: 8eb8c127091e + last_write_checksum: sha1:2b93a6bed5743461bb03c8337fb25dfc5a15522e + pristine_git_object: f9700df50b8f512c4139c1830aba18989d022b8e + docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md: + id: deff83b39b78 + last_write_checksum: sha1:dac8d8f2e95aed2db9b46711e6e80816881d5d14 + pristine_git_object: 883cbac685563d2e0959b63638f6b967ebdf1ee9 + docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md: + id: c45757ba1ed9 + last_write_checksum: sha1:4931469b58d454264f1e8d32df6a07d3f6f01022 + pristine_git_object: fb62eb62027c8151d597544fcaf27b972aeb78b3 + docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md: + id: 8aa8030f26d7 + last_write_checksum: sha1:4aada0d2297479d8276f5a422cb4dd6b56b1e176 + pristine_git_object: 7b52e2ca6365f17ac3b19d128937783d87c7fa37 + docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md: + id: a9b75762e534 + last_write_checksum: sha1:8f1395447928e089c88dce8c0ced1030ec5f0eba + pristine_git_object: fde19800303a901149bf39c5330ef8c4da87df62 + docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md: + id: c0b31f4fc621 + last_write_checksum: sha1:4ceb9df28082bf5d496cd222a0f45dc81a576367 + pristine_git_object: f770532776a13860e697da7478d1677d16f0ec36 + docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md: + id: 52078f097503 + last_write_checksum: sha1:fc134fdc7e229b8df373b77096c8299c214171a7 + pristine_git_object: 23c52c342358ea889b25ee7b18b381b68519c6cf + docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md: + id: 8545ffb587d6 + last_write_checksum: sha1:bbc08ca53c2da180b96ed0347cf4954410c79311 + pristine_git_object: 40d57686aec11d9bdc4c116ea4c98183e0a6414c + docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md: + id: b4e2b814d8c3 + last_write_checksum: sha1:f13b5c8f2e74cc73b58a30d366032c764603f95e + pristine_git_object: 4429fe480ab9486de98940a119ac63f40045313b + docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md: + id: cfd848845787 + last_write_checksum: sha1:a165279fa0c9e051458ea4333dfdd31ef0440426 + pristine_git_object: 1a7e71d4479369f13c391a9782278557bc4531ae + docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md: + id: 75b5dd1bcbaa + last_write_checksum: sha1:dd30e7ff8748d26497458f3398c0547113dc058f + pristine_git_object: 95c1734daa7164bedeeb1fa58dd792939f25bc17 + docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md: + id: 60bd2e28993a + last_write_checksum: sha1:58835c28cccaf90e99bbb72bf7c5a5ce42498824 + pristine_git_object: dbe49a86ca2bf64901133fd58a342d30909c35b2 + docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md: + id: c265a30fd4cf + last_write_checksum: sha1:410c62a884aae902cdfbfcab33779e62487de13b + pristine_git_object: f40350bf9d74d09ca3a2ec6d91d9068bda631ef5 docs/models/jsonschema.md: id: a6b15ed6fac8 last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f pristine_git_object: 7ff7c070353c58290416aff5b01d1dfc43905269 - docs/models/legacyjobmetadataout.md: - id: b3b8c262f61a - last_write_checksum: sha1:d8c4e7525e2dc2f4d29bfeb6cadc648fab1c62c7 - pristine_git_object: 8a712140fbf3c36f4bd9686e135b70d8688aa9c1 - docs/models/libraryin.md: - id: a08170e6397c - last_write_checksum: sha1:2c996ecf1ae5d9e8df702a79741b72b3571eb6ef - pristine_git_object: d6b119148725627bcf76594c4a24e915399cd8f8 - docs/models/libraryinupdate.md: - id: 6d06b6b21498 - last_write_checksum: sha1:4ec01d7f7e24f58a74613d4847725bfd516b7d7f - pristine_git_object: 4aa169c7669c00fcedc423fbff6f386697360787 - docs/models/libraryout.md: - id: 2e8b6d91ded2 - last_write_checksum: sha1:d71053b44725147265871be445217e3e1a0e5ede - pristine_git_object: ebf46d57de6bad7022a3e8cb8eaf88728bbbe888 - docs/models/listagentaliasesrequest.md: - id: 495659b2d40a - last_write_checksum: sha1:637e7e0e8deadcf2e77cc9469727010f90f0ad79 - pristine_git_object: b3570cb80d484dadaf2a138c70bbb477746ba416 - docs/models/listagentsrequest.md: - id: aeb9bbc163f5 - last_write_checksum: sha1:86c5f5068061b79d2e582e4dd9a8b0ed4c84cbcf - pristine_git_object: 79aec3ea6e3506797fc96a7ca9d7393543270866 - docs/models/listagentversionsrequest.md: - id: 3270f6dd4107 - last_write_checksum: sha1:14ffb20c5c48cca371ed27f6a6a8b565cd4a5565 - pristine_git_object: ba8ddaa5cb4c94623b29a1f635f38a04cc0ff497 - docs/models/listbatchjobsrequest.md: - id: e2a0b1528191 - last_write_checksum: sha1:01a587ec7cc6e183d47e106eb809e7c1e9e79e39 - pristine_git_object: 19981b2425254058bd24b218d1f7881fc3635c89 - docs/models/listconversationsrequest.md: - id: 6c0961051703 - last_write_checksum: sha1:453eb480cd48330f857b4c80210b6753a750348d - pristine_git_object: d99b420834b17f3f5b7fac630af7a7b0d2db341d - docs/models/listconversationsresponse.md: - id: 65075f5cf00c - last_write_checksum: sha1:8478c55b156c09f2b714d2854030a04494b48f7c - pristine_git_object: 9d611c553b245657181c06d7f65acaa9d8128556 - docs/models/listdocumentout.md: - id: 4bec19e96c34 - last_write_checksum: sha1:c0b3a6e3841f120c52b1d7718d7226a52fe1b6d6 - pristine_git_object: f14157b8db55c1201d9f7151742e9ddf0d191c16 - docs/models/listdocumentsrequest.md: - id: 36c8a1116534 - last_write_checksum: sha1:390849ce3d93a64c505b7b2f7cae411766a5e44b - pristine_git_object: 369e8edbe471dd5167ad1baf74ee5b00eb7d5043 - docs/models/listfilesout.md: - id: 98d4c59cc07e - last_write_checksum: sha1:e76df31628984095f1123005009ddc4b59b1c2bc - pristine_git_object: bcb1f13aa17f41dadb6af37541e929364e2d6cec - docs/models/listfilesrequest.md: - id: 70edaf3759f0 - last_write_checksum: sha1:686edbd5134dfe60cfd98221ec78d296a8429d28 - pristine_git_object: 2d76a76b011603e3a7c4b4932ef4b26def1cb792 - docs/models/listfinetuningjobsrequest.md: - id: 41878563fe80 - last_write_checksum: sha1:103cd0d3c5334ea60a6c6e1c2585bf9bd493c78f - pristine_git_object: 3a04fc709c2a12cc4f414701efcaec4584b7d6df - docs/models/listfinetuningjobsstatus.md: - id: 1d6d54dc70ea - last_write_checksum: sha1:c4f69e2b2b5aac719281d264722f2cba6aa048a0 - pristine_git_object: 07db9ae5d87b7192ada4843d4fe0d3e8573794c6 - docs/models/listlibraryaccessesrequest.md: - id: 0b387463f914 - last_write_checksum: sha1:2912e1fc3ee179f01fde7a21501e2501debecc2c - pristine_git_object: d98bcda22bbb2540a525f2ce1516a637446b0a0f - docs/models/listlibraryout.md: - id: ea34f8548bd6 - last_write_checksum: sha1:cec920357bc48bea286c05d16c480a9a9369b459 - pristine_git_object: db76ffa10eb97f143ad4a6930e520e389fe18153 + docs/models/legacyjobmetadata.md: + id: 50ac14d9b270 + last_write_checksum: sha1:ebe37a176ca318e797fee7ebf4eef73fb9938a12 + pristine_git_object: 4705ab4f67e10b8e2cbfc86b29c03a9945aeb8fb + docs/models/librariesdeletev1request.md: + id: c0c3b2e1aabc + last_write_checksum: sha1:bef84f8851b06d2d914b605f11109de1850d0294 + pristine_git_object: 68d7e54369ce75422bf8b0ff16cada1c0ae2b05c + docs/models/librariesdocumentsdeletev1request.md: + id: 9d557bd7d1cc + last_write_checksum: sha1:1b580b657559356886915ee5579b90a03db19337 + pristine_git_object: efccdb1bbc36cf644ed2d1716cbd202e6d6bf6c5 + docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md: + id: 27ad38ce4cb1 + last_write_checksum: sha1:b35ad610330232b395b5f87cc15f6ae270de6816 + pristine_git_object: 14ca66f72693f1df05eb93e0cca45f440b62d282 + docs/models/librariesdocumentsgetsignedurlv1request.md: + id: 4498715b6cfb + last_write_checksum: sha1:31f78079e31e070d080c99555cd2d85318fc4610 + pristine_git_object: 7c08c180d59a8e8475fea89424b8b2021d51385f + docs/models/librariesdocumentsgetstatusv1request.md: + id: c2219d3a3738 + last_write_checksum: sha1:44e79df94cf2686e83d7a2e793140a6a7b3a1c05 + pristine_git_object: e6d41875966348fd9e770d06c8099e48f0e64b5d + docs/models/librariesdocumentsgettextcontentv1request.md: + id: 850dfa465952 + last_write_checksum: sha1:4a1212e111525f4265d2924ce52f9c13d2787d4d + pristine_git_object: 2f58a4460ccdad531391318c62191e76c1ec22ac + docs/models/librariesdocumentsgetv1request.md: + id: cdd0df2f7e9d + last_write_checksum: sha1:36e5ef39552159044ecd28d20ee0792ea5bcadef + pristine_git_object: 6febc058425bb38857c391ee4c40d600858e6058 + docs/models/librariesdocumentslistv1request.md: + id: 7b5756e50d64 + last_write_checksum: sha1:2605b7972a3d7b4f73ab8052be4bf740f44f6f6f + pristine_git_object: 44f6300115853053214639982516a60b3268e778 + docs/models/librariesdocumentsreprocessv1request.md: + id: 1b8bf57b3f0a + last_write_checksum: sha1:8528785c1b4ae18d6ec6f261d29d5daac0d420a3 + pristine_git_object: 196ba17b749ce9efc1c30189864e474896814f85 + docs/models/librariesdocumentsupdatev1request.md: + id: b9147b1c0e38 + last_write_checksum: sha1:ed3ae7761990bd26a4bf99cd4641822eb90d3d57 + pristine_git_object: d46308509330099e30a53dddad51da8a6186aa92 + docs/models/librariesdocumentsuploadv1request.md: + id: 89a89d889c72 + last_write_checksum: sha1:32294a87d8a0b173b4d6f12b607a1bb3da765776 + pristine_git_object: 172a6183f31eec3142a84637414484799c2a4677 + docs/models/librariesgetv1request.md: + id: f47ad71ec7ca + last_write_checksum: sha1:3b2bf1e4f6069d0c954e1ebf95b575a32c4adeac + pristine_git_object: 6e1e04c39c15a85d96710f8d3a8ed11a22412816 + docs/models/librariessharecreatev1request.md: + id: 99e7bb8f7fed + last_write_checksum: sha1:e40d710ad1023768a0574b3283ef35544f6b0088 + pristine_git_object: 4c05241de4ee5a76df335ae9ea71004bd02b8669 + docs/models/librariessharedeletev1request.md: + id: bc8adba83f39 + last_write_checksum: sha1:79fc5a9a3cee5b060f29edd95f00e0fea32579cf + pristine_git_object: 850e22ab79863ba544f453138322c0eb5bf544cd + docs/models/librariessharelistv1request.md: + id: 86e6f08565e2 + last_write_checksum: sha1:6f2ffff66fa5fb141d930bca7bb56e978d62b4a5 + pristine_git_object: 98bf6d17ab013c1dd3f0ab18c37bbfc1a63f1b76 + docs/models/librariesupdatev1request.md: + id: f7e51b528406 + last_write_checksum: sha1:6a33b0161702ecc335dd2859df1bbc05b73702a9 + pristine_git_object: c5c142db7aaa49990135c21eabde43b8c0fdf756 + docs/models/library.md: + id: e8ec114dd107 + last_write_checksum: sha1:a4d6e9a777ce3d63aac24432281933ce6e13b4a9 + pristine_git_object: 4319f43df922b4924a11d494002826cb8d6dea0b + docs/models/listbatchjobsresponse.md: + id: e03025d58630 + last_write_checksum: sha1:de42c9396546fc8487d0bd6ed15b4076599fa83f + pristine_git_object: c23e32201d12a2594f97a493f63b2b7b42b9e337 + docs/models/listdocumentsresponse.md: + id: f2091cee0405 + last_write_checksum: sha1:335d0ccd3a448e65739d5a0cfa2c67614daec031 + pristine_git_object: 47b9d3b73fdc85bf6e463c91790faf346df56664 + docs/models/listfilesresponse.md: + id: b15df90d2d59 + last_write_checksum: sha1:4840f26427acf8846a9f1e48136f0663c6e4cd87 + pristine_git_object: 802f685fb3a76afb86a69cf41e6de9339cd6fbc7 + docs/models/listfinetuningjobsresponse.md: + id: d04e4dfddf78 + last_write_checksum: sha1:cebaf361aa10f1f6c4299c3c8a34f32d301455ad + pristine_git_object: 00251242023e2161747ebf00b4c2959909e2b654 + docs/models/listfinetuningjobsresponsedata.md: + id: 59c80de4086d + last_write_checksum: sha1:5a0d91c251b4b9283895d9f19f7b9416f93d4468 + pristine_git_object: adb0644475841c6a4686e8c42790dd44eed43dc1 + docs/models/listlibrariesresponse.md: + id: 87e3bec10745 + last_write_checksum: sha1:00522e685ec71a54f5f272d66b82e650848eaf36 + pristine_git_object: e21b9ced628f6fd5ae891d4a46666ebc94546859 docs/models/listsharingout.md: id: a3249129f37e last_write_checksum: sha1:4831e4f02e1d5e86f138c7bb6b04d095aa4df30f @@ -1050,68 +1054,40 @@ trackedFiles: pristine_git_object: 76256fb913376a15d5bcd2531b18f1a78b980c9d docs/models/messageinputcontentchunks.md: id: 34aac9c271db - last_write_checksum: sha1:641cd1dba3721f85b049c5ee514879f067483949 - pristine_git_object: 4fd18a0dcb4f6af4a9c3956116f8958dc2fa78d1 + last_write_checksum: sha1:d8ffdfd8b5458497e2cb6a32f52900c3ca2a6ddf + pristine_git_object: 0561785082c741f39f930ab7ded5b6c6a9ade6ad docs/models/messageinputentry.md: id: eb74af2b9341 - last_write_checksum: sha1:07124339ecb87e31df5f0e2f887e23209dd269af - pristine_git_object: 52183a32330b3e0bf91a1bd5e541dfda12d3f1a0 + last_write_checksum: sha1:c91bfdf9426c51236b6ff33d127dbe62b051a9da + pristine_git_object: f8514fb3305dbe1df91db8d622cc33a753b63623 docs/models/messageinputentrycontent.md: id: 7e12c6be6913 last_write_checksum: sha1:6be8be0ebea2b93712ff6273c776ed3c6bc40f9a pristine_git_object: 65e55d97606cf6f3119b7b297074587e88d3d01e - docs/models/messageinputentryobject.md: - id: 9a1d0d31f357 - last_write_checksum: sha1:7746753005fda37834a73e62bf459eacb740ba5b - pristine_git_object: 6bdd62e27d7353dbb7d521ad02bde358496ab108 - docs/models/messageinputentryrole.md: - id: 2497d07a793d - last_write_checksum: sha1:a41eb58f853f25489d8c00f7a9595f443dcca2e6 - pristine_git_object: f2fdc71d8bc818b18209cd1834d4fead4dfd3ba6 - docs/models/messageinputentrytype.md: - id: 5d2a466dad0f - last_write_checksum: sha1:19f689ffdd647f3ddc747daf6cb0b4e811dfdcee - pristine_git_object: d3378124db83c92174e28fe36907263e2cbe6938 docs/models/messageoutputcontentchunks.md: id: 802048198dc0 - last_write_checksum: sha1:d70a638af21ee46126aa0434bf2d66c8dd8e43ff - pristine_git_object: d9c3d50e295b50618f106ef5f6b40929a28164df + last_write_checksum: sha1:8cf4e4ea6b6988e22c117d8f689bbfb0869816ad + pristine_git_object: c4a7777e7675ebf2384311ec82b2713da69e5900 docs/models/messageoutputentry.md: id: f969119c8134 - last_write_checksum: sha1:cf5032929394584a31b3f12f55dfce6f665f71c7 - pristine_git_object: 5b42e20d1b03263f3d4d9f5cefe6c8d49c984e01 + last_write_checksum: sha1:f50b955cd622a6160c0ada34b0e14bff612802b7 + pristine_git_object: 73a1c666acc913b96d65a124612c4a728882bbc9 docs/models/messageoutputentrycontent.md: id: 44019e6e5698 last_write_checksum: sha1:d0cc7a8ebe649614c8763aaadbf03624bb9e47e3 pristine_git_object: 5206e4eb0d95e10b46c91f9f26ae00407d2dd337 - docs/models/messageoutputentryobject.md: - id: b3a7567581df - last_write_checksum: sha1:46528a6f87408c6113d689f2243eddf84bcbc55f - pristine_git_object: bb254c82737007516398287ff7878406866dceeb - docs/models/messageoutputentryrole.md: - id: bf7aafcdddab - last_write_checksum: sha1:e28643b6183866b2759401f7ebf849d4848abb10 - pristine_git_object: 783ee0aae4625f7b6e2ca701ac8fcdddcfe0e412 - docs/models/messageoutputentrytype.md: - id: 960cecf5fde3 - last_write_checksum: sha1:b6e52e971b6eb69582162a7d96979cacff6f5a9c - pristine_git_object: cb4a7a1b15d44a465dbfbd7fe319b8dbc0b62406 docs/models/messageoutputevent.md: id: b690693fa806 - last_write_checksum: sha1:d6538a4b5d5721c09bc196f3e9523ed45dafbea7 - pristine_git_object: b0fa1a2d369c89ec75f43c6b31ff52b0d80d9b1c + last_write_checksum: sha1:a4157c087ff95fa9445757c9d363615718156164 + pristine_git_object: e09a965f7d4cc35d6b120ba5555d96ba7b3e8a27 docs/models/messageoutputeventcontent.md: id: cecea075d823 last_write_checksum: sha1:16dac25382642cf2614e24cb8dcef6538be34914 pristine_git_object: 16d8d52f6ff9f43798a94e96c5219314731ab5fb - docs/models/messageoutputeventrole.md: - id: 87d07815e9be - last_write_checksum: sha1:a6db79edc1bf2d7d0f4762653c8d7860cb86e300 - pristine_git_object: e38c6472e577e0f1686e22dc61d589fdb2928434 - docs/models/metricout.md: - id: 7c6ff0ad95f9 - last_write_checksum: sha1:eef34dc522a351e23d7371c00a07662a0711ea73 - pristine_git_object: 3c552bac2fa3a5a3783db994d47d255a94643110 + docs/models/metric.md: + id: a812a3e37338 + last_write_checksum: sha1:14016848dcfaba90014b482634ed6d5715caa860 + pristine_git_object: 7f86303651650177ece51b82d867cab858e830ae docs/models/mistralpromptmode.md: id: d17d5db4d3b6 last_write_checksum: sha1:abcb7205c5086169c7d9449d15ac142448a7d258 @@ -1122,12 +1098,8 @@ trackedFiles: pristine_git_object: c7dd2710011451c2db15f53ebc659770e786c4ca docs/models/modelconversation.md: id: 497521ee9bd6 - last_write_checksum: sha1:440c9e7c306f20bd4f4b27ab0cf770d3bf8762e2 - pristine_git_object: 813e1f3a79ad14eae55bbb1b96598d6260904d9d - docs/models/modelconversationobject.md: - id: 4c5699d157a9 - last_write_checksum: sha1:8e2e82e1fa4cb97f8c7a8a129b3cc9cd651e4055 - pristine_git_object: ead1fa26f5d9641a198a14b43a0f5689456e5821 + last_write_checksum: sha1:22a8d7502eeaf176fbd1c7b22b512b4f9e4e043f + pristine_git_object: af2e5c6149339a561b03b954cd0e71f9d9aeffd6 docs/models/modelconversationtool.md: id: 2dd28167bc36 last_write_checksum: sha1:9b33f73330e5ae31de877a904954efe342e99c4f @@ -1166,8 +1138,8 @@ trackedFiles: pristine_git_object: 02473d44f73485fd7b7f0031d51bfac835d4036e docs/models/ocrrequest.md: id: 6862a3fc2d0f - last_write_checksum: sha1:9311e2c87f8f4512c35a717d3b063f2861f878d4 - pristine_git_object: 87929e53f8a74823b82ecce56d15f22228134fa6 + last_write_checksum: sha1:2faa819df648d330074c177d8f5d4a9c9a27bc90 + pristine_git_object: dd3fc2ea28cc2bc147473ba9f73aa32a9528632a docs/models/ocrresponse.md: id: 30042328fb78 last_write_checksum: sha1:8e4a4ae404ea752f3e9f1108c2a5f89ed6cfb143 @@ -1190,8 +1162,8 @@ trackedFiles: pristine_git_object: d0ee0db93f56c40f6684fcfdb5873aba586bc876 docs/models/outputcontentchunks.md: id: f7e175c8e002 - last_write_checksum: sha1:5094466110028801726cc825e8809f524fe1ee24 - pristine_git_object: c76bc31d4d8791b7bef4dc6cbff6671b38a7927d + last_write_checksum: sha1:5adb0733a8ca9b224155dfef66dfb37b7f416972 + pristine_git_object: e5185014faa41b6e6d1567d713fc390f551fad01 docs/models/paginationinfo.md: id: 3d2b61cbbf88 last_write_checksum: sha1:1da38e172024fe703f3180ea3c6ec91fe3c51ed0 @@ -1216,10 +1188,22 @@ trackedFiles: id: d25137243bef last_write_checksum: sha1:f8c3a4984d647d64e8ea4e1e42654265ffe46b0f pristine_git_object: da3764ef56337bdc773eaf8e9aa747cbd1b407e2 + docs/models/realtimetranscriptioninputaudioappend.md: + id: fa2aa317d1ca + last_write_checksum: sha1:59cce0828505fdb55104cd3144b75334e0f31050 + pristine_git_object: 5ee365eb9a993933509ac4666bcec24bfcc6fccd + docs/models/realtimetranscriptioninputaudioend.md: + id: 11045f9cc039 + last_write_checksum: sha1:945ca0475826294e13aba409f3ae2c2fc49b1b67 + pristine_git_object: 393d208c6e242959161f4436d53cf4aa2df69a92 + docs/models/realtimetranscriptioninputaudioflush.md: + id: c2f2258e0746 + last_write_checksum: sha1:a4e6d160da44c6f57b01059f7198208702e9b06a + pristine_git_object: 367725baa278935a6a282338ca7f2a23895a86d8 docs/models/realtimetranscriptionsession.md: id: aeb0a0f87d6f - last_write_checksum: sha1:c3aa4050d9cc1b73df8496760f1c723d16183f3a - pristine_git_object: 94a0a89e8ca03866f8b09202a28c4e0f7c3af2e6 + last_write_checksum: sha1:d72bf67442ac5e99f194c429e96a504685f02efb + pristine_git_object: 750bd7f79b65666812c6207d7085b9437c49517d docs/models/realtimetranscriptionsessioncreated.md: id: aa2ae26192d6 last_write_checksum: sha1:d13fec916d05300c86b52e951e81b1ceee230634 @@ -1228,26 +1212,26 @@ trackedFiles: id: 56ce3ae7e208 last_write_checksum: sha1:833db566b2c8a6839b43cb4e760f2af53a2d7f57 pristine_git_object: 7e2719957aae390ee18b699e61fbc7581242942f + docs/models/realtimetranscriptionsessionupdatemessage.md: + id: 02a5eee40cdd + last_write_checksum: sha1:44f8e6bc8f8cd4087a7e86c85db5141fab90f78d + pristine_git_object: 2a50ca92720bad6605bdeafd83b43d0e8bf40615 + docs/models/realtimetranscriptionsessionupdatepayload.md: + id: 3ddd5a95510a + last_write_checksum: sha1:33bca4d547ca812d55ac49bf7b17851b2fecfc80 + pristine_git_object: d6c6547d7895e53be15a0cce46b6524178acc3bc docs/models/referencechunk.md: id: 07895f9debfd - last_write_checksum: sha1:97d01dd2b907e87b58bebd9c950e1bef29747c89 - pristine_git_object: a132ca2fe6fbbaca644491cbc36d88b0c67cc6bc - docs/models/referencechunktype.md: - id: 0944b80ea9c8 - last_write_checksum: sha1:956b270766c7f11fe99f4a9b484cc29c159e7471 - pristine_git_object: 1e0e2fe64883ef5f3e628777b261b1224661d257 - docs/models/reprocessdocumentrequest.md: - id: 3c713aad474b - last_write_checksum: sha1:100b194196051470a2ae75cc2f707afec0c8d161 - pristine_git_object: cf3982a8cd76e4b2c8429acede0a12a044cbe2ca + last_write_checksum: sha1:4384049375a2566c7567599f97ce1ec19e9f6276 + pristine_git_object: d847e24845a399c7ca93d54701832fb65e01b3ab docs/models/requestsource.md: id: 8857ab6025c4 last_write_checksum: sha1:4b7ecc7c5327c74e46e2b98bd6e3814935cdecdf pristine_git_object: c81c115992439350d56c91d2e3351a13df40676b docs/models/response.md: id: 583c991c7a30 - last_write_checksum: sha1:f4a3ec06ff53cd1cbdf892ff7152d39fa1746821 - pristine_git_object: 3512b7a8f9fdfcaaed9a6db06ef4266629d9fa89 + last_write_checksum: sha1:0791cb4aa4045708ab64d42bf67bd6ab74bc7752 + pristine_git_object: ff67925758959b87992b47a1a32c224eeeb599e3 docs/models/responsedoneevent.md: id: 38c38c3c065b last_write_checksum: sha1:4ac3a0fd91d5ebaccce7f4098ae416b56e08416f @@ -1276,26 +1260,14 @@ trackedFiles: id: 48d4a45780a9 last_write_checksum: sha1:8e75db359f0d640a27498d20c2ea6d561c318d7e pristine_git_object: 844c5d610a9a351532d12b1a73f6c660059da76b - docs/models/restartconversationrequest.md: - id: b85b069aa827 - last_write_checksum: sha1:b7fb56a5561ab329f605d77795a610da8faaf561 - pristine_git_object: f24f14e67e749da884363038ca72891449cd99da - docs/models/restartconversationstreamrequest.md: - id: 65df276279f0 - last_write_checksum: sha1:907807c7e5969f82e70e743fddeb4c6f4278fc1a - pristine_git_object: daa661a9250701ad33241084d5033f73d75a9d6e - docs/models/retrievefileout.md: - id: 8e82ae08d9b5 - last_write_checksum: sha1:600d5ea4f75dab07fb1139112962affcf633a6c9 - pristine_git_object: 28f97dd25718833aaa42c361337e5e60488bcdc8 - docs/models/retrievefilerequest.md: - id: eac92ea7ca45 - last_write_checksum: sha1:c80772e3cfbe704385abe1b347d8e69d55bd9e00 - pristine_git_object: 454b9665b8134876488eb32c57a9dc45f4d972de - docs/models/retrievemodelrequest.md: - id: 392008b3324b - last_write_checksum: sha1:b9aafe10f0cd838a0b6959ec8dde5850ce59c55d - pristine_git_object: 787c3dd1000cba873c787fd5b9dcbe3c793f2b11 + docs/models/retrievemodelv1modelsmodelidgetrequest.md: + id: ac567924689c + last_write_checksum: sha1:7534c5ec5f1ae1e750c8f610f81f2106587e81a9 + pristine_git_object: f1280f8862e9d3212a5cfccd9453884b4055710a + docs/models/role.md: + id: b694540a5b1e + last_write_checksum: sha1:c7ef39a81299f3156b701420ef634a8b4fab76f0 + pristine_git_object: 853c6257d9bdb4eda9cb37e677d35ab477dca812 docs/models/sampletype.md: id: 0e09775cd9d3 last_write_checksum: sha1:33cef5c5b097ab7a9cd6232fe3f7bca65cd1187a @@ -1328,18 +1300,10 @@ trackedFiles: id: 6a902241137c last_write_checksum: sha1:567027284c7572c0fa24132cd119e956386ff9d0 pristine_git_object: ae06b5e870d31b10f17224c99af1628a7252bbc3 - docs/models/startfinetuningjobrequest.md: - id: 48fd313ae362 - last_write_checksum: sha1:f645c1e3e3244729eaa31aabb4b3ec0454fb114f - pristine_git_object: 9df5aee8f527fea4f0c9b02a28af77a65765be48 - docs/models/startfinetuningjobresponse.md: - id: 970045c710ff - last_write_checksum: sha1:78d230946abe19e928f286562ac589c7672c9854 - pristine_git_object: dce84c5a7711cd655a624b6ba0540504a6ff75d7 docs/models/systemmessage.md: id: fdb7963e1cdf - last_write_checksum: sha1:561c3372391e093c890f477b3213c308ead50b81 - pristine_git_object: dfb0cd0bd17aecbc1fe4b8410e78440f65038fef + last_write_checksum: sha1:c7603c5ce77ba2bcbda9eff65eeafdb1e9ecbec7 + pristine_git_object: 10bda10f921fb5d66c1606ff18e654b4e78ab197 docs/models/systemmessagecontent.md: id: 94a56febaeda last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb @@ -1354,24 +1318,16 @@ trackedFiles: pristine_git_object: 54f029b814fdcfa2e93e2b8b0594ef9e4eab792a docs/models/textchunk.md: id: 6cd12e0ef110 - last_write_checksum: sha1:f04818ca76e68b3d3684927e4032d5d7de882f6a - pristine_git_object: d488cb51abeb4913c8441d9fbe9e5b964099bb7e - docs/models/textchunktype.md: - id: 886e88ebde41 - last_write_checksum: sha1:ba8db2a3910d1c8af424930c01ecc44889335bd3 - pristine_git_object: e2a2ae8bcdf8a35ad580a7de6271a5d26cd19504 + last_write_checksum: sha1:d9fe94c670c5e0578212752c11a0c405a9da8518 + pristine_git_object: df0e61c32bc93ef17dbba50d026edace139fee6a docs/models/thinkchunk.md: id: bca24d7153f6 - last_write_checksum: sha1:feb95a931bb9cdbfe28ab351618687e513cf830b - pristine_git_object: 66b2e0cde70e25e2927180d2e709503401fddeab - docs/models/thinkchunktype.md: - id: 0fbeed985341 - last_write_checksum: sha1:790f991f95c86c26a6abb9c9c5debda8b53526f5 - pristine_git_object: baf6f755252d027295be082b53ecf80555039414 - docs/models/thinking.md: - id: 07234f8dd364 - last_write_checksum: sha1:a5962d1615b57996730da19e59fbfaa684321442 - pristine_git_object: c7a0d5c9811ea37aaf9e16b6e93c833ab979573f + last_write_checksum: sha1:0f861f1653035dea2018be9a977c15f54add9531 + pristine_git_object: 70c0369f16465e1b1f5f46e8cd799e5db536cdde + docs/models/thinkchunkthinking.md: + id: 22de7b5060fb + last_write_checksum: sha1:5e0722b8d513b38d60fbfe28635bdea40b951593 + pristine_git_object: dd1ecca12b5cda76a51b1e13335f1757a9dd7a68 docs/models/timestampgranularity.md: id: eb4d5a8e6f08 last_write_checksum: sha1:e256a5e8c6010d500841295b89d88d0eface3b88 @@ -1384,6 +1340,10 @@ trackedFiles: id: 80892ea1a051 last_write_checksum: sha1:cb27b9d36cfe6227978c7a7a01b1349b6bac99d9 pristine_git_object: 3819236b9f3eee2f6878818cfbbe2817e97f7de2 + docs/models/toolcallconfirmation.md: + id: 944eebb142ff + last_write_checksum: sha1:864ccb39a00094d965b764235e74709945abca3d + pristine_git_object: 1812f7d687d83f5692d9e79709e56813ab2c79b1 docs/models/toolchoice.md: id: "097076343426" last_write_checksum: sha1:25b33b34da02c3b46349dc8b6223f9ae18370d16 @@ -1392,6 +1352,10 @@ trackedFiles: id: 15410de51ffc last_write_checksum: sha1:ca0cf9bf128bebc8faedd9333cc6a56b30f58130 pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 + docs/models/toolconfiguration.md: + id: 06bfa2c4e662 + last_write_checksum: sha1:9b619977375f228c76f09d48d6e2833add6c07e2 + pristine_git_object: 89286a172124ce3473bcb081de6e4db8c95afefa docs/models/toolexecutiondeltaevent.md: id: f2fc876ef7c6 last_write_checksum: sha1:ae1462a9b5cb56002b41f477ce262cb64ccf2f4e @@ -1410,60 +1374,44 @@ trackedFiles: pristine_git_object: 6449079d7b467796355e3353f4245046cced17e8 docs/models/toolexecutionentry.md: id: 75a7560ab96e - last_write_checksum: sha1:fdaa9abd5417486100ffc7059fcfdc8532935ed3 - pristine_git_object: adf88fb1acec13bf8016eb42d6bdc5fd3bd279b5 + last_write_checksum: sha1:668d8fbc59bc729bf4b1d95d2f2bfe4097701c0e + pristine_git_object: 03316381b130cf02751b10fef4129c8f23072b76 docs/models/toolexecutionentryname.md: id: 86d537762559 last_write_checksum: sha1:6c528cdfbb3f2f7dc41d11f57c86676f689b8845 pristine_git_object: fb762a5382d8b0e93dc2eb277f18adf810057c55 - docs/models/toolexecutionentryobject.md: - id: af106f91001f - last_write_checksum: sha1:6df075bee4e84edf9b57fcf62f27b22a4e7700f4 - pristine_git_object: 0ca79af56d60094099c8830f638a748a92a40f21 - docs/models/toolexecutionentrytype.md: - id: b61e79a59610 - last_write_checksum: sha1:b0485bae901e14117f76b8e16fe80023a0913787 - pristine_git_object: a67629b8bdefe59d188969a2b78fa409ffeedb2a docs/models/toolexecutionstartedevent.md: id: 37657383654d - last_write_checksum: sha1:47126a25c2a93583038ff877b85fc9ae1dcef9f3 - pristine_git_object: c41c7258779f15f1f0436ad890f4947d780bfa75 + last_write_checksum: sha1:5a020d24bdeb4eb9976ce93a8daa91947026bde9 + pristine_git_object: 189b8a3d3b22d73000850a3f1a95b85e358c2090 docs/models/toolexecutionstartedeventname.md: id: be6b33417678 last_write_checksum: sha1:f8857baa02607b0a0da8d96d130f1cb765e3d364 pristine_git_object: 3308c483bab521f7fa987a62ebd0ad9cec562c3a docs/models/toolfilechunk.md: id: 67347e2bef90 - last_write_checksum: sha1:0a499d354a4758cd8cf06b0035bca105ed29a01b - pristine_git_object: a3ffaa2b8339ae3a090a6a033b022db61a75125b + last_write_checksum: sha1:2e4c6ce703733c02e62467507c231033716fdb92 + pristine_git_object: d60021755729f1a2870e24a500b3220c8f1fc6e3 docs/models/toolfilechunktool.md: id: eafe1cfd7437 last_write_checksum: sha1:73a31dbff0851612f1e03d8fac3dbbee77af2df0 pristine_git_object: aa5ac8a99a33d8c511f3d08de93e693bf75fb2a1 - docs/models/toolfilechunktype.md: - id: f895006e53e4 - last_write_checksum: sha1:258a55eef5646f4bf20a150ee0c48780bdddcd19 - pristine_git_object: 7e99acefff265f616b576a90a5f0484add92bffb docs/models/toolmessage.md: id: 0553747c37a1 - last_write_checksum: sha1:f35fa287b94d2c1a9de46c2c479dadd5dca7144d - pristine_git_object: fa00d666d6d2baea0aac10fcdeff449eb73c9d39 + last_write_checksum: sha1:ac61e644ba7c6da607cb479eafd1db78d8e8012e + pristine_git_object: 7201481e61e269b238887deec30c03f7e16c53d7 docs/models/toolmessagecontent.md: id: f0522d2d3c93 last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 docs/models/toolreferencechunk.md: id: 10414b39b7b3 - last_write_checksum: sha1:2e24f2331bb19de7d68d0e580b099c03f5207199 - pristine_git_object: 3020dbc96563e2d36941b17b0945ab1e926948f4 + last_write_checksum: sha1:ea3bdfc83177c6b7183ad51fddb2d15aee0f0729 + pristine_git_object: 49ea4ca7b05e5fcaaf914f781e3a28483199d82d docs/models/toolreferencechunktool.md: id: c2210d74792a last_write_checksum: sha1:368add3ac6df876bc85bb4968de840ac578ae623 pristine_git_object: 999f7c34885015a687c4213d067b144f1585c946 - docs/models/toolreferencechunktype.md: - id: 42a4cae4fd96 - last_write_checksum: sha1:43620d9529a1ccb2fac975fbe2e6fcaa62b5baa5 - pristine_git_object: bc57d277a39eef3c112c08ffc31a91f5c075c5a4 docs/models/tooltypes.md: id: adb50fe63ea2 last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c @@ -1478,12 +1426,8 @@ trackedFiles: pristine_git_object: 1bc0189c5d1833c946a71c9773346e21b08d2404 docs/models/transcriptionsegmentchunk.md: id: f09db8b2273e - last_write_checksum: sha1:5387f2595d14f34b8af6182c34efac4874a98308 - pristine_git_object: 00a599ee8442f45ce4f529da18ad3e9486b12f9f - docs/models/transcriptionsegmentchunktype.md: - id: 01bda77a53f8 - last_write_checksum: sha1:63d511c2bd93bd477f1b7aae52954b28838316d9 - pristine_git_object: 2968fa26a2dd390b66974e6db57317616fb3b832 + last_write_checksum: sha1:d4a7ebd6a8cc512a0bd00a49af4130c533254b44 + pristine_git_object: d7672c0eebb55243965306c94a771aa18ed641d6 docs/models/transcriptionstreamdone.md: id: 2253923d93cf last_write_checksum: sha1:2a1910d59be258af8dd733b8911e5a0431fab5a4 @@ -1506,68 +1450,44 @@ trackedFiles: pristine_git_object: 63fcfbc63a65cdff4228601e8a46f9d003ec9210 docs/models/transcriptionstreamsegmentdelta.md: id: f59c3fb696f2 - last_write_checksum: sha1:4a031b76315f66c3d414a7dd5f34ae1b5c239b2e - pristine_git_object: e0143a39fb12a4a3efce3e1b250730d20cf21c7d + last_write_checksum: sha1:7d6999abf5a01fc01c0d5302acd3218e535adc9a + pristine_git_object: 1b652a3b6dc4406a3b7efa8a412b15ca0a5d765f docs/models/transcriptionstreamtextdelta.md: id: 69a13554b554 - last_write_checksum: sha1:de31f5585d671f85e6a9b8f04938cf71000ae3f7 - pristine_git_object: a4062171d7630bcea967a89d8df6cffd4908285f - docs/models/unarchiveftmodelout.md: - id: 4f2a771b328a - last_write_checksum: sha1:0b9ab5d6c7c1285712127cfac9e918525303a441 - pristine_git_object: 12c3d74534897129766397a44afee0f4dac91d9f - docs/models/unarchivemodelrequest.md: - id: e6922871c93a - last_write_checksum: sha1:591461141df5089e884a2db13bfaaef1def0748c - pristine_git_object: 033dad8a66969e2b920ec40391c38daa658c6f0e + last_write_checksum: sha1:d969f462034ed356f2c8713b601ee7d873d4ce07 + pristine_git_object: 77bd0ddcf8a1d95707fa9e041de3a47bb9e7f56d + docs/models/unarchivemodelresponse.md: + id: a690f43df567 + last_write_checksum: sha1:5c9d4b78c92d30bb4835cb724d1ea22a19bf5327 + pristine_git_object: 375962a7110f814288ea9f72323383bd8194e843 docs/models/updateagentrequest.md: id: 371bfedd9f89 - last_write_checksum: sha1:f9ebaa4650f77595fd554bb2711d4b869cba06cc - pristine_git_object: 358cb71d2ab7dfae85ac7768936910a976d2f644 - docs/models/updateagentversionrequest.md: - id: 706f66fb34eb - last_write_checksum: sha1:913a8105b77620d32147a00c1223ce5a117d2df2 - pristine_git_object: b83eb867a518d757b23d981c962f87a0e9c8a454 + last_write_checksum: sha1:97170995ed40391023f0dce5096cfebe83fa7dc8 + pristine_git_object: d3428d92a8f23670a6b587a6017a353d2c12a815 + docs/models/updateagentrequesttool.md: + id: bdf961d2c886 + last_write_checksum: sha1:5355f8c97b2aef98aebff251e1f4830ddbaa7881 + pristine_git_object: e358b1edb9035667104700dde890bb0b43074543 docs/models/updatedocumentrequest.md: id: ee4e094a6aa7 - last_write_checksum: sha1:4798ef091b5d045b0cda3d2a3cc40aef0fb3155c - pristine_git_object: fa5d117a4016208d81ad53f24daa4284b35152f8 - docs/models/updateftmodelin.md: - id: 1b98d220f114 - last_write_checksum: sha1:d1c7a8f5b32228d8e93ad4455fccda51b802f08f - pristine_git_object: 4e55b1a7d96e1ad5c1e65c6f54484b24cd05fcfc + last_write_checksum: sha1:4c4d774c67449402eb7e1476b9d0fef5b63f2b99 + pristine_git_object: 7e0b41b7be9f559b27a3430f46ed53d0453f6e03 docs/models/updatelibraryrequest.md: id: 2eda82f12f31 - last_write_checksum: sha1:cc1ca5b6f9bd4ab61e3983991f5656ff5ea22e8d - pristine_git_object: e03883cca75f3ed17fa3432e0abc2c892ec3d74a + last_write_checksum: sha1:436e08988daa8ca04ece36a4790ed84e0629b81a + pristine_git_object: aaffc5a9f0d588ff935db2ec2c079af9f162c2c3 docs/models/updatemodelrequest.md: id: 8eabdced3e0e - last_write_checksum: sha1:28765fe537adb34e5e2ef051cd1226bdcae8ea9f - pristine_git_object: 5799c63babcd9377c5024f584328c814c4401c04 - docs/models/updatemodelresponse.md: - id: 742d796d5be3 - last_write_checksum: sha1:2e09ab747fa3247486b25057e887baf0859c3a5b - pristine_git_object: 275ee77f111b926d681a446af9741001a1c88fa8 - docs/models/updateorcreatelibraryaccessrequest.md: - id: c95e6b3df38f - last_write_checksum: sha1:f957324978f18d9831dafe4d1a5d78f755f51ed6 - pristine_git_object: e04567b40d62e0d705096eedaba9fa84913f584d - docs/models/uploaddocumentrequest.md: - id: a211b5f814e4 - last_write_checksum: sha1:ce851cd52da0250c8d86f1346778edb0b5c97a50 - pristine_git_object: 92152b7f247ae4d7f8373e8b13ce947b7ca2cae7 - docs/models/uploadfileout.md: - id: c991d0bfc54c - last_write_checksum: sha1:ce5af8ffadb8443a6d1ca5fbbc014de42da35b9d - pristine_git_object: 6f09c9a6920f373c730fa3538b0c2953d757c257 + last_write_checksum: sha1:96879df11c005b591f2e59975897feff8fc8656e + pristine_git_object: 56b84c59c48ac135345394235c71ce77d384e33e docs/models/usageinfo.md: id: ec6fe65028a9 last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 docs/models/usermessage.md: id: ed66d7a0f80b - last_write_checksum: sha1:627f88dbb89e226a7d92564658c23a0e8d71342a - pristine_git_object: 78ed066eed9f0638edc6db697eaeaad6f32b4770 + last_write_checksum: sha1:f0ed7d9cb7264f1d9e4a9190772df3f15e25346c + pristine_git_object: e7a932ed71496fa7cc358388c650d25f166f27a4 docs/models/usermessagecontent.md: id: 52c072c851e8 last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a @@ -1584,82 +1504,82 @@ trackedFiles: id: ba1f7fe1b1a3 last_write_checksum: sha1:ef35648cec304e58ccd804eafaebe9547d78ddcf pristine_git_object: c73952d9e79ea8e08bc1c17817e74e3650def956 - docs/models/wandbintegrationout.md: - id: c1a0f85273d8 - last_write_checksum: sha1:ce7ffc6cc34931b4f6d2b051ff63e1ca39e13882 - pristine_git_object: a6f65667a6bcfb18b78f8f766ab71de84ca13ca7 + docs/models/wandbintegrationresult.md: + id: 729c2601b338 + last_write_checksum: sha1:49f442907815de4661a85a3521803d80b953a17e + pristine_git_object: d12bc31191ba534a9744d78f657c19e7f93f777a docs/models/websearchpremiumtool.md: id: 267988aa8c3f - last_write_checksum: sha1:f9b761d727cbe0c60a2d0800b0a93929c5c3f5e7 - pristine_git_object: 07b8b9265e01bd28b1c30fbc3f1283285e7d6edd + last_write_checksum: sha1:38f80a43f73a13ddedc7730f853c092a48b665f9 + pristine_git_object: 78b736cd314617caa0d77f3c42015212e37ab539 docs/models/websearchtool.md: id: fc4df52fb9b5 - last_write_checksum: sha1:047fd9f950d5a86cf42a8f3ac40f754b395e39ec - pristine_git_object: da5e7b7b600fa3fd0799e95e7a0f9507cd8456c3 + last_write_checksum: sha1:72636dc7ae74264bb5158d284ef6f83da5290b27 + pristine_git_object: 4ca7333c412ad819e3e02c61debe402e3f9b0af9 docs/sdks/accesses/README.md: id: 2ea167c2eff2 - last_write_checksum: sha1:200d509484a1a27fec893e15c39043a9deb140da - pristine_git_object: c1e3866d1a37e1596fa61538317eb68907cbaf57 + last_write_checksum: sha1:279d3b3a4f625b89b25e9a2a47886ac6008b3ca0 + pristine_git_object: c50456df9ea2bb71f78a83ad28f90e089d2e2cd7 docs/sdks/agents/README.md: id: 5965d8232fd8 - last_write_checksum: sha1:a655952f426d5459fa958fa5551507e4fb3f29a8 - pristine_git_object: cd3ec4c6c87f34c4d3634bf510534dff163d97de + last_write_checksum: sha1:a73ae6719acef32b47be55ea5c5684e91f7eda68 + pristine_git_object: 8a60837030b9e5dd0adca0d07d9f0266158b080f docs/sdks/batchjobs/README.md: id: a3b8043c6336 - last_write_checksum: sha1:eca07f3c47acbe42264d31fba982a49005a8c983 - pristine_git_object: 24316d78b1be51649d186db1479bbf74f00f87e6 + last_write_checksum: sha1:b4b3123ff210545048e2b0c729f2b7e5f7460f4e + pristine_git_object: 3633fe4ee136c1ac90f9446425f62a0d68fa4f90 docs/sdks/betaagents/README.md: id: 5df79b1612d8 - last_write_checksum: sha1:f2dbb543e7bd1db239ee801c55fa1f7f92ca6322 - pristine_git_object: 0ef655a348d7381aa0a7869a022b362d90497197 + last_write_checksum: sha1:9ec1c7a967bc653fe175a7986ddec74d5feb0714 + pristine_git_object: aaa5110e6db30f5450877b67d70d46e53b98996b docs/sdks/chat/README.md: id: 393193527c2c - last_write_checksum: sha1:908e67969e8f17bbcbe3697de4233d9e1dd81a65 - pristine_git_object: 6907c29d26b51fa7748b339cc73fd3d6d11a95a5 + last_write_checksum: sha1:5e7a43def5636140d70a7c781ed417e527ce9819 + pristine_git_object: 1bf4aeadc762f5d696c278eefaa759f35993e9d5 docs/sdks/classifiers/README.md: id: 74eb09b8d620 - last_write_checksum: sha1:f9cc75dbb32ea9780a9d7340e524b7f16dc18070 - pristine_git_object: 41b520812ac8a6031c0ab32aa771e9903fa24a97 + last_write_checksum: sha1:9f11740f8cf1a3af44fff15b63916305f1882505 + pristine_git_object: dc0f4984380b5b137266421e87a1505af5260e89 docs/sdks/conversations/README.md: id: e22a9d2c5424 - last_write_checksum: sha1:55b150757576819887075feac484ba76ae8abd59 - pristine_git_object: c0089f12b040f3686a584f1569ed4e0ab56c52fb + last_write_checksum: sha1:4c5f8ea93d560956cb23c26e0d5f6d7cbc129e07 + pristine_git_object: e77d329b735dc21f620470bcf82220a79bc34e18 docs/sdks/documents/README.md: id: 9758e88a0a9d - last_write_checksum: sha1:55280d8863200affd25a98d7493a0110c14baad3 - pristine_git_object: 97831f86223c6dbbaec35a240725a8c72e229961 + last_write_checksum: sha1:ac7ab2598066971e8b371a3e73aa266ec697df1b + pristine_git_object: 9c219b6709d5d5bfa28113efca92012e8c5a5112 docs/sdks/embeddings/README.md: id: 15b5b04486c1 - last_write_checksum: sha1:46e57c7808ce9c24dd54c3562379d2ff3e0526e8 - pristine_git_object: 0be7ea6dcace678d12d7e7e4f8e88daf7570df5d + last_write_checksum: sha1:76cb4876eebccfd2ab9a10a1b25570477a96a5c1 + pristine_git_object: eecb5c9e991dcd2fd5c1f0688efe3b64b4c6de3b docs/sdks/files/README.md: id: e576d7a117f0 - last_write_checksum: sha1:92558cd6688432150cc433391e2b77a328fa3939 - pristine_git_object: ae29b7bf9383f534b2ca194ec5ff261ff17b5fb6 + last_write_checksum: sha1:f5861c42227b901742fd8afe7155ed6d634b1b4c + pristine_git_object: 9507326be83eaf750daa12c0b1421d819b72340d docs/sdks/fim/README.md: id: 499b227bf6ca - last_write_checksum: sha1:34ff7167b0597bf668ef75ede016cb8884372d1b - pristine_git_object: 3c8c59c79db12c916577d6c064ddb16a511513fd + last_write_checksum: sha1:5b2ce811df8d867d14fe0126f2c9619cca779f56 + pristine_git_object: 49151bf5be49ce6554679bc5c30906894a290ecb docs/sdks/finetuningjobs/README.md: id: 03d609f6ebdd - last_write_checksum: sha1:206624c621a25836333f4c439e0247beb24a7492 - pristine_git_object: fe18feeb640804d9308e6fefe9b5f2371d125f9b + last_write_checksum: sha1:2d7ff255c1462d5f1dff617a1993e730ec3911ea + pristine_git_object: 4262b3a9833180ce86da43a26ee7ab27403f2cd0 docs/sdks/libraries/README.md: id: df9a982905a3 - last_write_checksum: sha1:1c623647aa7b834a844e343c9e3fe0763c8445a5 - pristine_git_object: 8835d0ec8cbabcb8ab47b39df982a775342c3986 + last_write_checksum: sha1:e3eb0e9efb3f758fdf830aa1752c942d59a4f72b + pristine_git_object: 7df1ef4e26449af572412f052ee7ad189039544f docs/sdks/models/README.md: id: b35bdf4bc7ed - last_write_checksum: sha1:2410579fd554ad1e5734cc313d0a75eeb04a1d14 - pristine_git_object: 0cbf1bdde52d1a52c1329ecd1116718237be5152 + last_write_checksum: sha1:2aa91ffe637c049aed0d63d24ac39688b6ecb270 + pristine_git_object: 311a2db6e213902ac5a2c27acf19f856dae2c264 docs/sdks/ocr/README.md: id: 545e35d2613e - last_write_checksum: sha1:a8d22a86b79a0166ecec26a3e9379fa110d49b73 - pristine_git_object: 9fd9d6fc14c5874dbb819239ea677a171a26969b + last_write_checksum: sha1:da377d75b6b7480c335d7f721bb06fe11492be38 + pristine_git_object: fde2a82339e10c74aca6d1b4168b62501d7bbf83 docs/sdks/transcriptions/README.md: id: 089cf94ecf47 - last_write_checksum: sha1:493070fcce7cec1a627b04daa31c38a6745659e7 - pristine_git_object: 9691b81d3a7eb27d7b2b489408d32513859646c9 + last_write_checksum: sha1:15d118796f147bc5b0bf4146ba39bfa9edfbc996 + pristine_git_object: 97703c9b4dc942385ee04ae96cbd100c3f632a17 py.typed: id: 258c3ed47ae4 last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 @@ -1670,8 +1590,8 @@ trackedFiles: pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 src/mistralai/client/__init__.py: id: f1b791f9d2a5 - last_write_checksum: sha1:fcca936cb62cc76d57372d5bd5735877b79b53a4 - pristine_git_object: 481fc91604c413966c8510d8341edaa3355fc276 + last_write_checksum: sha1:c05dc9845d3361c4aae7796b079ac0e7952e8606 + pristine_git_object: 4b79610a3fc8222fc8f9adeeaf798e894708fc06 src/mistralai/client/_hooks/__init__.py: id: cef9ff97efd7 last_write_checksum: sha1:9a6f060871150610f890cc97676c3afe9050b523 @@ -1686,156 +1606,236 @@ trackedFiles: pristine_git_object: 036d44b8cfc51599873bd5c401a6aed30450536c src/mistralai/client/_version.py: id: cc807b30de19 - last_write_checksum: sha1:dd6d1521f7ecfc56be58eafc1709873a04d27fb0 - pristine_git_object: 814d9ec74a37ae50f106ea07b3c174e65685521b + last_write_checksum: sha1:03563b818feb27386f7d6a0321a3875e3024a2d2 + pristine_git_object: 1a4d15d66f45d13c7f9cae550138390b5cf5897e src/mistralai/client/accesses.py: id: 76fc53bfcf59 - last_write_checksum: sha1:16574ca54176ec30b236ab1a4694f57a6314db43 - pristine_git_object: cda484c8feade66829dad587f5f397aa89d4fb6f + last_write_checksum: sha1:ed94623aa8a2bd502572a699a2f54c9281ec283e + pristine_git_object: 0761b0bc6080ab0d891be70089a1908d435559fa src/mistralai/client/agents.py: id: e946546e3eaa - last_write_checksum: sha1:3b46ac68d37563a9eb988ad2978083e40cf4513d - pristine_git_object: 0942cb20173f0b2e3f828f5857e3aa221f65bc1b + last_write_checksum: sha1:7049cab7c308888c88b0341fb29f0132e154e3cb + pristine_git_object: 2b70d1520663d999773159d89b1f9dc96f7fbf97 src/mistralai/client/audio.py: id: 7a8ed2e90d61 last_write_checksum: sha1:e202d775d24c0303053e0548af83fcb04e2748f4 pristine_git_object: f68f063c08a099d07904456daa76d8e2d2ecdbe6 src/mistralai/client/basesdk.py: id: 7518c67b81ea - last_write_checksum: sha1:795253524d0911d227b934978bdacb84619177a3 - pristine_git_object: 611b40597b42ac309871681b38a3b3c249cbe494 + last_write_checksum: sha1:2cea76931db51175b2c787d0c707f08e9944c22f + pristine_git_object: a976121bd224d64497e5006cb58dd728f6a67144 src/mistralai/client/batch.py: id: cffe114c7ac7 last_write_checksum: sha1:b452983f67b33f26e1faa60fdbbb171cb1877224 pristine_git_object: 7e36fd0d73ebeb873f74f4109896a6cf3bb7d2ba src/mistralai/client/batch_jobs.py: id: 3423fec25840 - last_write_checksum: sha1:eb1baade19f5da3dd815ebfbabccca139eb7b25d - pristine_git_object: 752c76524a4fa19ed1654943218ca5182d563ca3 + last_write_checksum: sha1:34de0e986e7c0e4377f70125d319e522280c565f + pristine_git_object: 0e135b30cd122d1a813ee67bf2f9037953448e73 src/mistralai/client/beta.py: id: 981417f45147 last_write_checksum: sha1:85f42fc6c2318eef94c90405b985120220c9c617 pristine_git_object: 65b761d18f7274cc33162a83efa5b33211f78952 src/mistralai/client/beta_agents.py: id: b64ad29b7174 - last_write_checksum: sha1:227c2ef3812c06e4a813063bf9d2282ce0884ecd - pristine_git_object: 4e692f17579635d5f0cc03f86b8158b3344ae87f + last_write_checksum: sha1:7c900a6b1483108a367050440667c069b08fbb92 + pristine_git_object: 157c5de4c66273e6df468f8a12b4399f9efb32fb src/mistralai/client/chat.py: id: 7eba0f088d47 - last_write_checksum: sha1:6f052ac3117829b16906a4e1cbfa5b1f7ab104fd - pristine_git_object: 35698d32ac870f4b59c03f02700f20c04b14462d + last_write_checksum: sha1:520b0da011d63c60bd0d3a960a410a8f4a6a3e22 + pristine_git_object: 13b9c01f035c4fd6f60b78f20a1801bedf3b582b src/mistralai/client/classifiers.py: id: 26e773725732 - last_write_checksum: sha1:abd5033ee390fdeddfa4af918cc44f6210a2a6a0 - pristine_git_object: 3407c4b77db429535465f29754a2da8145d6a5fe + last_write_checksum: sha1:ee94a4e50cda893f9c19c2304adda8b23fc2de9e + pristine_git_object: 67199b601e38dff6fc6a4317eb845fbde6c25de0 src/mistralai/client/conversations.py: id: 40692a878064 - last_write_checksum: sha1:6e81283d3d5db5dd554af68d69313951cf5f4578 - pristine_git_object: 646b91f3980bbe9be01078162d5b4ad9afb141b9 + last_write_checksum: sha1:1101b9e374010ba9cb080c30789672cfcfc45c55 + pristine_git_object: ec33b1fb12d1923ef5f686ed09c5fe5ae889e40c src/mistralai/client/documents.py: id: bcc17286c31c - last_write_checksum: sha1:9ae89ef80a636b55ba4cdc3ad6c77c47c1824433 - pristine_git_object: c78f2944edaac77864ff6c4dd8d19d3aab3f0cb6 + last_write_checksum: sha1:37669f51eba1b352a5e3c7f3a17d79c27c7ea772 + pristine_git_object: b3130364c0f3cc90ed1e4407a070bd99e3cce606 src/mistralai/client/embeddings.py: id: f9c17258207e - last_write_checksum: sha1:7cd6d848ed8978637988d9b7e1a7dd92dac5eb3b - pristine_git_object: 4a056baa014217927412e9dd60479c28de899e2e + last_write_checksum: sha1:0fbf92b59fde3199c770a522ead030f8fa65ff5c + pristine_git_object: 5f9d3b9cb611943e509caeda9ddd175e3baee2c3 + src/mistralai/client/errors/__init__.py: + id: 0b2db51246df + last_write_checksum: sha1:0befddc505c9c47388683126750c7ad0e3fbef52 + pristine_git_object: 58a591a1cc2896f26df2075ffca378ca6c982d1e + src/mistralai/client/errors/httpvalidationerror.py: + id: ac3de4a52bb6 + last_write_checksum: sha1:73251adb99a07d11b56d0bc0399a2362ff9ccdba + pristine_git_object: 97b165629c39ab6e24406eb3f13970414b73f8f7 + src/mistralai/client/errors/mistralerror.py: + id: d1f57f0ff1e9 + last_write_checksum: sha1:30065cdd7003ec02cb3463d7c63229c4ff97503c + pristine_git_object: eb73040c5b5251018695204fde80eac914b35dae + src/mistralai/client/errors/no_response_error.py: + id: 8b469ecb0906 + last_write_checksum: sha1:0b3fdb1136472c41a4a739a5cbf9e2a4ce0c63a4 + pristine_git_object: d71dfa7b24146f1390ac6830e61acf337b99ca83 + src/mistralai/client/errors/responsevalidationerror.py: + id: 6cfaa3147abe + last_write_checksum: sha1:6862d178d4d1964bc03db47b76709aa406546981 + pristine_git_object: a7b3b9f0207846b5f176076b9f400e95cb08ebb9 + src/mistralai/client/errors/sdkerror.py: + id: c489ffe1e9ca + last_write_checksum: sha1:f708168e46c2960dd51896083aee75ccdb36f9dd + pristine_git_object: 25b87255a51021079f8ba5cc60b43509e12f9a4d src/mistralai/client/files.py: id: f12df4b2ce43 - last_write_checksum: sha1:aa647afa486bbed48083c0b1ec954bdc5cfd0280 - pristine_git_object: 57d389f1e245f5768fe9e8991f65229dd4bd608d + last_write_checksum: sha1:a16c8702d15339200b09c62948c06f79e720d79c + pristine_git_object: a5f3adf6dd9b60a202c70edf7d2a148a626ce471 src/mistralai/client/fim.py: id: 217bea5d701d - last_write_checksum: sha1:90cacb025a1a1fb81e619d59819c0a652f4a5efa - pristine_git_object: be3f7742b866ac58b7bbb65e3593e9865dee134f + last_write_checksum: sha1:dc427c9e954dfb9a7fe2df8b5c544877a28cdc73 + pristine_git_object: 8ffb7730a03398322dfdd6c83724096d4924c5c5 src/mistralai/client/fine_tuning.py: id: 5d5079bbd54e last_write_checksum: sha1:fe1f774df4436cc9c2e54ed01a48db573eb813cd pristine_git_object: df6bc5643a13294ddfbeecc6ae84d00cd7199bed src/mistralai/client/fine_tuning_jobs.py: id: fa1ea246e0b2 - last_write_checksum: sha1:edfe25f99047d4cbd45222cd23823c782286a2c8 - pristine_git_object: 9a28ded152a4f4a5b625a97e087aebc5a287d71e + last_write_checksum: sha1:8cbf3827f5c2e43170192de39be498af0bf24cf0 + pristine_git_object: c2ee871bb1ccf7e3e24081121a7e54f1483eee5c src/mistralai/client/httpclient.py: id: 3e46bde74327 last_write_checksum: sha1:0f4ecc805be1dc3d6e0ca090f0feb7d988f6eb9d pristine_git_object: 544af7f87d6b7097935290bebd08e30e5f485672 src/mistralai/client/libraries.py: id: d43a5f78045f - last_write_checksum: sha1:b3fd0348f4f56aab9873d09c45ed9575baf6e7c3 - pristine_git_object: 26ceabe19a340b7fd4dbb74aebab62bc45093ae5 + last_write_checksum: sha1:6440b3df71fe557ecba5c23768d115efd4ceb26f + pristine_git_object: b8728362b87349118ac6f163f50613dd18c43340 src/mistralai/client/models/__init__.py: id: e0e8dad92725 - last_write_checksum: sha1:d047eab2a2a8ee5af65ed19055a0a3e3092ad2c5 - pristine_git_object: 093ffcbdb0b57458cf856f585e6637d7d5955e8d + last_write_checksum: sha1:50727667552480e8298431f5a3dcc78457c53331 + pristine_git_object: 5ef8b3f3dd9fbb32d4675f7e11808c29fc218c57 src/mistralai/client/models/agent.py: id: 1336849c84fb - last_write_checksum: sha1:d41a96558ddbd52b6c71d316c291847bb6131a01 - pristine_git_object: 05ae24cde5149e30004b7cd4a2409c753682be56 + last_write_checksum: sha1:6090ddf2b5b40656dfbf3325f1022a40ae418948 + pristine_git_object: 686a6eb84ecd27e725e3773b3f7773dddac1c10c src/mistralai/client/models/agentaliasresponse.py: id: 3899a98a55dd last_write_checksum: sha1:d7e12ea05431361ad0219f5c8dee11273cd60397 pristine_git_object: 6972af2a4ae846e63d2c70b733ecd6c8370ee0cd src/mistralai/client/models/agentconversation.py: id: 1b7d73eddf51 - last_write_checksum: sha1:bc2f1a3710efc9c87d6796ccce953c9ce9cf3826 - pristine_git_object: a850d54c64de0c84ad4ea2b11ea1a828eb2580c4 - src/mistralai/client/models/agentcreationrequest.py: - id: 35b7f4933b3e - last_write_checksum: sha1:d3f61940b4cccfc9c13860844f4115e60b095823 - pristine_git_object: 898d42a9c16ffe893792e14445e9ebfcbd046ba3 + last_write_checksum: sha1:28718fb00dbe74241712b4f7a3fbce2d060f7e86 + pristine_git_object: da30c6634294cdaba459b68ca8877d867ee052fb src/mistralai/client/models/agenthandoffdoneevent.py: id: 82628bb5fcea - last_write_checksum: sha1:537e9f651de951057023d3712fa1820da17a21b4 - pristine_git_object: 40bf84970e1d245c3c7fbad64d73f648f8287438 + last_write_checksum: sha1:829c5a152e6d737ffd65a3b88b0b2890e6703764 + pristine_git_object: e2609e3d1fb62b132eb53112eb2bdc4ae855085f src/mistralai/client/models/agenthandoffentry.py: id: 5030bcaa3a07 - last_write_checksum: sha1:afe800c64c74aa79fceda4e4ce808f67573edbc7 - pristine_git_object: b18fe17c70d561b926bdac04124ebca8fc1cca0b + last_write_checksum: sha1:c9544755ad6d3a3831f8afe446c6a9a523eb5137 + pristine_git_object: f92ef2cc7310d5df94436f3067a640d3848405f0 src/mistralai/client/models/agenthandoffstartedevent.py: id: 2f6093d9b222 - last_write_checksum: sha1:933f8be5eacd86881a42cfb83612f327caa77ee7 - pristine_git_object: e278aef39d3bc5e158a094c593391fa8ad77c320 + last_write_checksum: sha1:c9f86e01497c53f3c1806dbb9fdff6e2d9993323 + pristine_git_object: 2a4023419212fec8b3f0e83d506a25b17408a8b1 + src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py: + id: 23a832f8f175 + last_write_checksum: sha1:237d6b4419615c9c26f96d49760732bd7b4617e7 + pristine_git_object: 04761ae786c35e6fa6cd5a896a5e52458cb3a5d5 + src/mistralai/client/models/agents_api_v1_agents_delete_aliasop.py: + id: 9c9947e768d3 + last_write_checksum: sha1:385faebecef8479d1a72a7ab6f15ddcc611dad87 + pristine_git_object: 291a9802a7d49108fc0d428610cb4c37b42f0796 + src/mistralai/client/models/agents_api_v1_agents_deleteop.py: + id: 95adb6768908 + last_write_checksum: sha1:f222a61a73ba2f37051fffbf2d19b3b81197d998 + pristine_git_object: 5e41fdcdbf182e993acd71603ecb8c9a14e48043 + src/mistralai/client/models/agents_api_v1_agents_get_versionop.py: + id: ef9914284afb + last_write_checksum: sha1:c99ee098f659a56cb365c280cc29de441916b48a + pristine_git_object: 941863d0f8143020200bb5566ce66d527c4369c8 + src/mistralai/client/models/agents_api_v1_agents_getop.py: + id: f5918c34f1c7 + last_write_checksum: sha1:b90285965e2aaccaf989e59b8f1db4a53ae8b31c + pristine_git_object: dd17580dd0041a979fc6c9c7349d14a3e200f5d3 + src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py: + id: a04815e6c798 + last_write_checksum: sha1:b4b5c4e8566f1d0c68a14aba94b7ffea257fd7ce + pristine_git_object: bb1da6020386fabfbd606db9a098a0e9323ce3b0 + src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py: + id: 19e3310c3907 + last_write_checksum: sha1:6628e9ff747c579e11fa9a756cee3b11c57c476d + pristine_git_object: 54b62e90e23c1782a0b068460d6877cac3b28916 + src/mistralai/client/models/agents_api_v1_agents_listop.py: + id: 25a6460a6e19 + last_write_checksum: sha1:0abe889b85470b28917368a2b958a13303bd38f1 + pristine_git_object: 97b1c7f1a070be5e12e1a32ad56dbcfcb0f1cd68 + src/mistralai/client/models/agents_api_v1_agents_update_versionop.py: + id: 63f61b8891bf + last_write_checksum: sha1:e9046cf75e008e856f00dda8725cbb16d83cd394 + pristine_git_object: 5ab821ea413d656dc7194f3588c8987c3e720831 + src/mistralai/client/models/agents_api_v1_agents_updateop.py: + id: bb55993c932d + last_write_checksum: sha1:bc922e15651d7bb33b841d9b3ae247843b6a5426 + pristine_git_object: 69da5001007916e458cab6caf8c10073c8fbc7d6 + src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py: + id: ec00e0905f15 + last_write_checksum: sha1:d0a253c2f383241378e6fab35a38427d0a1dd827 + pristine_git_object: d257dc789cdc4f57bb91d1788335d2d49442d02f + src/mistralai/client/models/agents_api_v1_conversations_appendop.py: + id: 39c6125e850c + last_write_checksum: sha1:864ece4ddcd65075547daa1ab996ba7cfe9939fc + pristine_git_object: 61fec0834e6e05a56a7ee5c984fb0401f9c72f5c + src/mistralai/client/models/agents_api_v1_conversations_deleteop.py: + id: 0792e6abbdcb + last_write_checksum: sha1:9725fce86a52b4995a51e1995ca114c0c4b414df + pristine_git_object: 499645a77782e29db61e439060340fee787799c1 + src/mistralai/client/models/agents_api_v1_conversations_getop.py: + id: c530f2fc64d0 + last_write_checksum: sha1:241e5a07f37fa88f1e5011615b3e2b47a1aaf6a7 + pristine_git_object: 504616abbf0c9d0595f2aae81c59e52352cee323 + src/mistralai/client/models/agents_api_v1_conversations_historyop.py: + id: 2f5ca33768aa + last_write_checksum: sha1:fccc3e1a3f48eff31463829037a440be667a7da1 + pristine_git_object: ef0a4eb084de52d4bde435ee9751aaa12e61dcc3 + src/mistralai/client/models/agents_api_v1_conversations_listop.py: + id: 936e36181d36 + last_write_checksum: sha1:e3e52cf7967b9b78099db9449cb33e3ded34d111 + pristine_git_object: 8bf66aea23f16734c1f9e03629aaf7246e4e60b4 + src/mistralai/client/models/agents_api_v1_conversations_messagesop.py: + id: b5141764a708 + last_write_checksum: sha1:17fd503da7fb20198792c6e25f94dcc0a1e5db05 + pristine_git_object: 19978a194e2dd633fe89bcee7ceac177fcdd6629 + src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py: + id: c284a1711148 + last_write_checksum: sha1:2e462249c8ab71376c5f6179a2c033e254165f3e + pristine_git_object: 63c744498dfbdd254f2e780d90a680b10100ee63 + src/mistralai/client/models/agents_api_v1_conversations_restartop.py: + id: 3ba234e5a8fc + last_write_checksum: sha1:5dd06d300dbe8832b72d868657dc4c58f0ebaad5 + pristine_git_object: 3186d5df9000d4a62c0fbc64a601e6b709803deb src/mistralai/client/models/agentscompletionrequest.py: id: 3960bc4c545f - last_write_checksum: sha1:ee1e60d894d3a9277c1a3970c422483ffa502e21 - pristine_git_object: f4a2d646927c8c0f250507f52c5e7515830759ad + last_write_checksum: sha1:5d81a0421184ed547208e8ea7cff47b18fc00788 + pristine_git_object: 6955f6acb023fd842d9ec46a694d270a66911c0e src/mistralai/client/models/agentscompletionstreamrequest.py: id: 1b73f90befc2 - last_write_checksum: sha1:3bc4976eeda6d9b30bba72e7f7c417ca9ba885c5 - pristine_git_object: 732e2402190d40bc5360868d3048d57fff9e7b55 - src/mistralai/client/models/agentupdaterequest.py: - id: 2d5a3a437819 - last_write_checksum: sha1:4a0ef549756904749a36b580cc2296a6a54d6647 - pristine_git_object: 96e209d41b638002f129ec4c13748082ccc3a8db + last_write_checksum: sha1:b46298a653359bca205b6b1975bcd1909e563dff + pristine_git_object: c2cf35522236f29ca1b9f2a438dfc79a59ca3e2a src/mistralai/client/models/apiendpoint.py: id: 00b34ce0a24d last_write_checksum: sha1:733e852bf75956acd2c72a23443627abfa090b7b pristine_git_object: a6665c1076f05c28936510c24ee7d3498d7e7a24 - src/mistralai/client/models/appendconversationop.py: - id: 1c47dd1e7c7e - last_write_checksum: sha1:109ced509e3caa5e5c9610b3a18839d113be708a - pristine_git_object: 710b8e1ca3fbfbb747e48d7699588bc199a41274 - src/mistralai/client/models/appendconversationstreamop.py: - id: 1ab08b189e9d - last_write_checksum: sha1:edd2a91da29f83646538b57e4d29f006d31f9dec - pristine_git_object: 55efca0e32c5d54d100563500aee9b61952d07c2 - src/mistralai/client/models/archiveftmodelout.py: - id: bab499599d30 - last_write_checksum: sha1:92f5b5a89ae5e52523d30069629e0ac8dc858d6b - pristine_git_object: 3107116c4a2c78c591999b220349325612a19b4e - src/mistralai/client/models/archivemodelop.py: - id: beefa1df3b7c - last_write_checksum: sha1:6f78b2f84f42267d4928a5a3ad1d3d3cae417cac - pristine_git_object: 30b4a9bd71f349cc4ab4b12df73770d327008527 + src/mistralai/client/models/archivemodelresponse.py: + id: 2d22c644df64 + last_write_checksum: sha1:d0f67fd2bc5a6e8de4f2b0a5742ceb4a1f7f5ab8 + pristine_git_object: f1116850c8bf0159c4146d4973988ea5d0fe7de7 src/mistralai/client/models/assistantmessage.py: id: 2b49546e0742 - last_write_checksum: sha1:a58ecb7bc381af02d83247f0518a3d34013b4575 - pristine_git_object: 5a4a2085e838196d3ab2b1c00bbeb7a78516dfb2 + last_write_checksum: sha1:dcfa31c2aac95a0d7bd748e96bd87a5c85c0d1f6 + pristine_git_object: 26a778c70439d21b890e85f2c85dbb560e8bffef src/mistralai/client/models/audiochunk.py: id: ce5dce4dced2 - last_write_checksum: sha1:8218d4c7118f677f16a3a63c55348c40d3ab3156 - pristine_git_object: a51868279b9b4ce2d97990286512d69f8d7f2e82 + last_write_checksum: sha1:d3c2e28583d661a9583c40c237430a1f63ea7631 + pristine_git_object: 68866cd2c3c640cf56258f2f98b8a2385ea6fcdb src/mistralai/client/models/audioencoding.py: id: b14e6a50f730 last_write_checksum: sha1:92ca06dce513cd39b2c7d9e5848cf426b40598ce @@ -1846,84 +1846,68 @@ trackedFiles: pristine_git_object: fef87ae76b31128ebd5ced4278e274c249181c23 src/mistralai/client/models/audiotranscriptionrequest.py: id: e4148b4d23e7 - last_write_checksum: sha1:6d7b267bc241c1f72b5b7839d6e2ad76a4c1ecff - pristine_git_object: 8c47a83cada33d8dbd4a9ffdedb55d3f4f55dadf + last_write_checksum: sha1:a6ef85be4ae24aa79c8c3fa9dcaf055e0ba9b266 + pristine_git_object: fe4c79e3427fae3e022bd936236d2934eaa76b60 src/mistralai/client/models/audiotranscriptionrequeststream.py: id: 33a07317a3b3 - last_write_checksum: sha1:66ae6146b9b75600df720054ec2c21e4e996b1fe - pristine_git_object: a080cee24c0d67c38fc6785c440418580e097700 + last_write_checksum: sha1:6e648ff58a70a0a3bd63a24676122b80eba4baf7 + pristine_git_object: 2d1e9269b51d84cd8b21643fe04accd00839b013 src/mistralai/client/models/basemodelcard.py: id: 556ebdc33276 - last_write_checksum: sha1:94871ce94c92fbbcff9fa5d6a543c824b17ee13b - pristine_git_object: 17a3e5c93339082f408f9ab5f34b5a01e24c74e0 + last_write_checksum: sha1:e2c3d1effee5b434fea9b958c0dd54fa96143924 + pristine_git_object: 9c9e9a2045a10f4606f11ee5886a19ccf03bbf0e src/mistralai/client/models/batcherror.py: id: 1563e2a576ec - last_write_checksum: sha1:9b59325428680d58151696c1738ad5466d67a78d - pristine_git_object: c1bf722a45c4326f24f7fd34ea536d59c48c67f2 - src/mistralai/client/models/batchjobin.py: - id: 72b25c2038d4 - last_write_checksum: sha1:667020377b2ca85dfd9c5aed96c7d4ba2571093b - pristine_git_object: a0c3b9146130a5ebfdbd0ec6338092bacc01bf85 - src/mistralai/client/models/batchjobout.py: - id: cbf1d872a46e - last_write_checksum: sha1:9031bc5ff1986ddc283551f7f5d210c9de67cc56 - pristine_git_object: 99c2b95118364d660f0cefde16507a83e8c9cafe - src/mistralai/client/models/batchjobsout.py: - id: 20b2516e7efa - last_write_checksum: sha1:426287f6ef9ed88e75f9e318582627d066f9e4f0 - pristine_git_object: f65fc040a964c68c82b5df7d3fb9e40222182322 + last_write_checksum: sha1:51c9e9a4d306c2de45dc0879ade62daed3fc2972 + pristine_git_object: 8a353cd2dc06a8c6f2db3d6b613cfdca8278f57e + src/mistralai/client/models/batchjob.py: + id: 85cd28932cc7 + last_write_checksum: sha1:532a8c6ca8546052159e5e5174cf65ce17a62f3f + pristine_git_object: 80acac336883c23b621d0dc647fef20548bf061a src/mistralai/client/models/batchjobstatus.py: id: 61e08cf5eea9 last_write_checksum: sha1:78934183519948464385245cbc89efb68ac00bfb pristine_git_object: bd77faa2fbed74b19a8d3884af6d43bc1b4806e0 src/mistralai/client/models/batchrequest.py: id: 6f36819eeb46 - last_write_checksum: sha1:115df324d1fec164bae60bf4b37acfa5149b3172 - pristine_git_object: 41c4523456398b302e0b7eb35824efc014f03aa6 + last_write_checksum: sha1:b2a71163e37a9483e172dc13b6320749bee38f2f + pristine_git_object: 911a9a0554b9b8cb6dedcb3a86a06c39890b875e src/mistralai/client/models/builtinconnectors.py: id: 2d276ce938dc last_write_checksum: sha1:4ceb3182009b6535c07d652ccf46661b553b6272 pristine_git_object: ecf60d3c1a83028d9cf755d4c9d5459f6b56e72a - src/mistralai/client/models/cancelbatchjobop.py: - id: cebac10b56a9 - last_write_checksum: sha1:2614180488e51c0e701fffdb058b39892c5bc1e5 - pristine_git_object: cd94ee86467247fe2bc7f7381fa05b57bedabef1 - src/mistralai/client/models/cancelfinetuningjobop.py: - id: c9a1b39f0d02 - last_write_checksum: sha1:139d3c443678aeeb8afedea8b2a783210e5ac28c - pristine_git_object: ddd445bb433df9a0f987693d97088d79e5e8c47f src/mistralai/client/models/chatclassificationrequest.py: id: afd9cdc71834 - last_write_checksum: sha1:91f62e46c415a0168442695f61cb30756227ed1a - pristine_git_object: 8b6d07b906c688a3849b8a4576cc10e075a6868f + last_write_checksum: sha1:a29088359142ebd6409f45569168b2096014119e + pristine_git_object: cf2aa78af3ffc747d557422b83551075b83e601d src/mistralai/client/models/chatcompletionchoice.py: id: 7e6a512f6a04 last_write_checksum: sha1:de0281a258140f081012b303e3c14e0b42acdf63 pristine_git_object: 2c515f6e9a290ebab43bae41e07493e4b99afe8f src/mistralai/client/models/chatcompletionrequest.py: id: 9979805d8c38 - last_write_checksum: sha1:95c0879e52d8b6c1ff389a5dfe1776129c764c00 - pristine_git_object: 4f7d071b5a0b84ef27397b4acaf4a798b6178eb8 + last_write_checksum: sha1:1f0390718ab06126a05e06797ef6af310ccab543 + pristine_git_object: e871bd92733ac400fdfeb2cf4f66fc32a7584103 src/mistralai/client/models/chatcompletionresponse.py: id: 669d996b8e82 last_write_checksum: sha1:97f164fea881127ac82303e637b6a270e200ac5b pristine_git_object: 7092bbc18425091d111ec998b33edc009ff0931b src/mistralai/client/models/chatcompletionstreamrequest.py: id: 18cb2b2415d4 - last_write_checksum: sha1:12e794c89a954702c3d4dccddad9b365331bd996 - pristine_git_object: ec7d2ae131cf5fac7eb618bbe09340ac23d444ef + last_write_checksum: sha1:c197792ed1dd78159ab0b970f8f76087ff2c4d6b + pristine_git_object: b7b2bff138cee9c130fa01d6157d8b6c21ea5a9c src/mistralai/client/models/chatmoderationrequest.py: id: 057aecb07275 - last_write_checksum: sha1:e18a5ae518f5413b1bff45f85f823b60e00ef32a - pristine_git_object: a8d021e8deb2015470765340281789a7fba544aa - src/mistralai/client/models/checkpointout.py: - id: 3866fe32cd7c - last_write_checksum: sha1:5ed4988914acef48854337127c4ca51791de3ab9 - pristine_git_object: 3e8d90e920cd34ff611f5e875c0163e1a4087f6f + last_write_checksum: sha1:7677494c0e36ccbc201384cb587abeb852a1a924 + pristine_git_object: 228e7d26b8b172c3e11f01d4f260bf6e5195b318 + src/mistralai/client/models/checkpoint.py: + id: 1a530d3674d8 + last_write_checksum: sha1:418f08c61b64fa7ffb053c6f5912e211acab1330 + pristine_git_object: c24e433eb4787146620fb48b6d301f51a4db5067 src/mistralai/client/models/classificationrequest.py: id: 6942fe3de24a - last_write_checksum: sha1:c98f6751aeba813b968aaf69c3551972b94da4c8 - pristine_git_object: 903706c31176da4c2ab021b3bcaeb2217ca98f76 + last_write_checksum: sha1:7bd416d4b0e083efbf9324107263027140702ddb + pristine_git_object: 25b6941355cb9629abb9c0f09fb6fd191c56ffa6 src/mistralai/client/models/classificationresponse.py: id: eaf279db1109 last_write_checksum: sha1:64522aa2b0970e86a0133348411592f95163f374 @@ -1932,222 +1916,194 @@ trackedFiles: id: 2445f12b2a57 last_write_checksum: sha1:2b8b9aeadee3b8ffe21efd1e0c842f9094c4ecc7 pristine_git_object: 6c7d6231d211977332100112900ea0f8cdf5d84c - src/mistralai/client/models/classifierdetailedjobout.py: - id: d8daeb39ef9f - last_write_checksum: sha1:1b6dde6554e51d9100f2e50779eff56b3ca07603 - pristine_git_object: bc5c5381d61b6b4945b51dc9836bcc2e7aa66f9f - src/mistralai/client/models/classifierftmodelout.py: - id: 2903a7123b06 - last_write_checksum: sha1:5141a0c29da0739057c52b2345a386c79d6f8f85 - pristine_git_object: 182f4954c2b3f1408cb05eee76e2bf24005b023e - src/mistralai/client/models/classifierjobout.py: - id: e19e9c4416cc - last_write_checksum: sha1:c5daf7e879911ea24fba847a1c12ab9774ebbe98 - pristine_git_object: 03a5b11c46097733d609f3b075b58ef729f230a5 - src/mistralai/client/models/classifiertargetin.py: - id: ed021de1c06c - last_write_checksum: sha1:8a1db343861e4f193a56d4030862c1f3a361d3e1 - pristine_git_object: b250109bd03976c93c571dbbacb1c631acd19717 - src/mistralai/client/models/classifiertargetout.py: - id: 5131f55abefe - last_write_checksum: sha1:304408da049ff4ad17f058267ffaa916ef907dc2 - pristine_git_object: 3d41a4d9c887488e7b08cc9d5d8dcb5b0fd26781 + src/mistralai/client/models/classifierfinetunedmodel.py: + id: 5a9a7a0153c8 + last_write_checksum: sha1:853bf1b3b941ec3aebeb17ac2caf38fa0dd094de + pristine_git_object: fbcf5892d7f0a3ed8b3872d71dd95ed3a25463d1 + src/mistralai/client/models/classifierfinetuningjob.py: + id: a244d5f2afc5 + last_write_checksum: sha1:ceb13935702275025284bb77aa8bf5ccf926e19c + pristine_git_object: fb160cf8e16a1b4899f8bb2803b18ba1f55232ce + src/mistralai/client/models/classifierfinetuningjobdetails.py: + id: 75c5dee8df2e + last_write_checksum: sha1:6b3f2f7ca3bd4e089591f5f9c59b7e28a00447f8 + pristine_git_object: 5d73f55ee0f1321fdeeb4db1971e144953e8e27f + src/mistralai/client/models/classifiertarget.py: + id: 2177d51d9dcf + last_write_checksum: sha1:c801dacc31e2d7682285a9a41d8ef38fa2e38fb9 + pristine_git_object: 4d66d789a42a0bc8762998161f1ad801bd8d96d4 + src/mistralai/client/models/classifiertargetresult.py: + id: 19c343844888 + last_write_checksum: sha1:3f5b37de3585cb38a3e41f0ee49dc4b5a33bf925 + pristine_git_object: 8ce7c0ca167b38ebaf1e5fc6393ab56d9f142cfa src/mistralai/client/models/classifiertrainingparameters.py: id: 4000b05e3b8d - last_write_checksum: sha1:4063f78ea65f138578bef4ce8908b04e556cc013 - pristine_git_object: f360eda504f0aa3f60ba6834aab59c1beb648151 - src/mistralai/client/models/classifiertrainingparametersin.py: - id: 4b33d5cf0345 - last_write_checksum: sha1:7764e6e6c5fc58e501c0891d036bbb22a8ddcb07 - pristine_git_object: 85360a7e7ba5212ef9052d3bd5f368ea4e2c4d98 + last_write_checksum: sha1:d7ce2f1017463c52856b973d696c9abecf5f79e3 + pristine_git_object: 14fa4926f8b5b62aa6b5d8864c40d5acf66e7b15 src/mistralai/client/models/codeinterpretertool.py: id: 950cd8f4ad49 - last_write_checksum: sha1:b014008db6ddce4b35aedec70783d74ce1b5cf83 - pristine_git_object: f69c7a5777af16df151589d2c5c8d81de4d28638 + last_write_checksum: sha1:8c3d91805d6c5f5cc9d249216694781faf15ea68 + pristine_git_object: ce14265f6d312c3da52014d2a058b6a730d5c980 src/mistralai/client/models/completionargs.py: id: 3db008bcddca - last_write_checksum: sha1:4c4ba2d39540bbb06fc1c49815fc6a7c8cf40ab2 - pristine_git_object: 918832acf3ea3d324c20e809fcdb1eae2ba3d7fd + last_write_checksum: sha1:e3d36235610c0546d8a2f2bb0a1db0f953747d88 + pristine_git_object: ab5cf5ff2d4df92d00664803f9274696ae80216d src/mistralai/client/models/completionargsstop.py: id: 5f339214501d last_write_checksum: sha1:744878976d33423327ea257defeff62073dad920 pristine_git_object: 39c858e66380044e11d3c7fd705334d130f39dea src/mistralai/client/models/completionchunk.py: id: d786b44926f4 - last_write_checksum: sha1:04b634cffa4b0eb8ca177c91d62d333a061160df - pristine_git_object: 67f447d0c6cd97cb54ffcd0c620654629ac4e848 - src/mistralai/client/models/completiondetailedjobout.py: - id: 9bc38dcfbddf - last_write_checksum: sha1:4771444753ff456829249d4e5fa5f71f2328fa78 - pristine_git_object: cd3a86ee28cdbf3a670d08f27642294321849ec0 + last_write_checksum: sha1:15f1b57b696b46bf6986c8f1a53d6bbf8d2351e2 + pristine_git_object: 5fd6c173ef29fb9bf2f570e0c2300268221e1ad3 src/mistralai/client/models/completionevent.py: id: c68817e7e190 last_write_checksum: sha1:dc43ac751e4e9d9006b548e4374a5ec44729eea4 pristine_git_object: 3b90ab0c1ecac12f90e0ae3946a6b61410247e4f - src/mistralai/client/models/completionftmodelout.py: - id: 0f5277833b3e - last_write_checksum: sha1:1c83e1d0a868eef32792844d787c5aaede0386b8 - pristine_git_object: 7ecbf54aabf022392e6d2ce2d0a354b9326eec79 - src/mistralai/client/models/completionjobout.py: - id: 712e6c524f9a - last_write_checksum: sha1:2c8500593b8f9257a0a389f87792cd174fcd7209 - pristine_git_object: 42e5f6c65809aaaa02f0bf58fbf031f4c476208b + src/mistralai/client/models/completionfinetunedmodel.py: + id: f08c10d149f5 + last_write_checksum: sha1:5fbd8c5475c250cbed1c2d2f47de372e8e92b128 + pristine_git_object: 54a1c1656aea1954288e9144670c939e29a83c47 + src/mistralai/client/models/completionfinetuningjob.py: + id: c242237efe9b + last_write_checksum: sha1:e4352be2411c7026c054a6fe380b87242183d4e4 + pristine_git_object: 1bf0a730c389be30bac2acfa17ffc6b5891e4918 + src/mistralai/client/models/completionfinetuningjobdetails.py: + id: e8379265af48 + last_write_checksum: sha1:b11c9bdc161da6a5cbd9f35f4bc5b51f0f3cea9c + pristine_git_object: cb7870219b261e260feceb6109088b0bbf8a6408 src/mistralai/client/models/completionresponsestreamchoice.py: id: 5969a6bc07f3 - last_write_checksum: sha1:874d3553d4010a8b83484588dcbf9136bd8c6537 - pristine_git_object: 119a9690727ae296acf72dcfafdd224a61582599 + last_write_checksum: sha1:59730cdaeeb3e95f4d38f63c34a4e491f40e6010 + pristine_git_object: a52ae892fcaafe54918160d055ee2badac31404e src/mistralai/client/models/completiontrainingparameters.py: id: be202ea0d5a6 - last_write_checksum: sha1:fd9a12417cd4f7bdc1e70ba05bbfef23b411ddd0 - pristine_git_object: 4b846b1b9bbcc4f2c13306169b715f08241e8f1c - src/mistralai/client/models/completiontrainingparametersin.py: - id: 0df22b873b5f - last_write_checksum: sha1:a92e9df1d5be2a7f2d34b1dcde131e99e5ee351d - pristine_git_object: 20b74ad9fc0c50fe7d1d3dd97fcd3c296fbf7042 + last_write_checksum: sha1:1a797019770795edcd911ff5b3580bedb83c05f4 + pristine_git_object: ca50a7ad521b46f275dd3a39c98911f13ee527c8 src/mistralai/client/models/contentchunk.py: id: c007f5ee0325 - last_write_checksum: sha1:5cedb52346bc34cb30950496d34ab87d591b6110 - pristine_git_object: eff4b8c670f47f53785690415751be05284f3d8b + last_write_checksum: sha1:b921b03b4c1e300b0e3f51ea9eadd4d7c4b7a0ea + pristine_git_object: e3de7591a089a3739af17108cecdc2d4240f10bf src/mistralai/client/models/conversationappendrequest.py: id: 81ce529e0865 - last_write_checksum: sha1:83e883e4324d76d74521607390747ecdf7dffaa0 - pristine_git_object: 0f07475e4ca640ce50a6214fe59a91041a2e596a + last_write_checksum: sha1:bdae860241893ec3ab3f22bd57c45dede2927da3 + pristine_git_object: 386714fd6dcccff8abb2247d7474949d9e8e79f8 src/mistralai/client/models/conversationappendstreamrequest.py: id: 27ada745e6ad - last_write_checksum: sha1:12c3c63b763bd16398fcbec7d6fab41729ee81a6 - pristine_git_object: a0d46f727ff99d76a1bf26891df3b0ed80a88375 + last_write_checksum: sha1:0a563cb146c4806ee6a133d10e7af8839e6f38dd + pristine_git_object: 32f6b148c647d3bac8edada3b941c51c17d78901 src/mistralai/client/models/conversationevents.py: id: 8c8b08d853f6 - last_write_checksum: sha1:6362a88ae26cb67f7abc3d2b0963f9a869c15371 - pristine_git_object: f24760381501f822593ef5903df0d32ca3cf9b47 + last_write_checksum: sha1:2eedde1ecf31061fb13de0b1bdc9ea311897b570 + pristine_git_object: 17812983f3aee3e675d44f46ca1b741315c2139a src/mistralai/client/models/conversationhistory.py: id: 60a51ff1682b - last_write_checksum: sha1:6fa8bdd370239df879da7b687c037405a8fbbe25 - pristine_git_object: 92d6cbf90c9c76945ee79752d5b4232aea10a79d + last_write_checksum: sha1:8984a0b12766e350022796a44baf6aac4c93f79b + pristine_git_object: ceef115b70ff02da05ac97571a177edf5b5f6cf6 src/mistralai/client/models/conversationinputs.py: id: 711b769f2c40 last_write_checksum: sha1:5fc688af61d6a49ede9c9709069f3db79f4dc615 pristine_git_object: 7ce3ffc3772926a259d714b13bfc4ee4e518f8f7 src/mistralai/client/models/conversationmessages.py: id: 011c39501c26 - last_write_checksum: sha1:408e26cb45dc1bdf88b1864d365e636307920df3 - pristine_git_object: 1aa294a497d2eb27a12dcbcce36c7956f6ee4f4e + last_write_checksum: sha1:95e3abe55199f2118e6fb7e5d8520af6a929449a + pristine_git_object: 84664b62337dcdc408bb01e0494fa598e6a86832 src/mistralai/client/models/conversationrequest.py: id: 58e3ae67f149 - last_write_checksum: sha1:f1b0b2b6a9c9b94ed5e3a77fb0b92e695f421a2e - pristine_git_object: 2005be82d8ebcf8c8fa74074abf25f072e795582 + last_write_checksum: sha1:f7a67082e06c1789f4c6a4c56bfef5f21cce5034 + pristine_git_object: 83d599ebf984f1df2390d97dbe651881f7dee0e2 src/mistralai/client/models/conversationresponse.py: id: ad7a8472c7bf - last_write_checksum: sha1:8b625fe8808f239d6bc16ecf90ae1b7f42262c0c - pristine_git_object: 24598ef3fc24a61a0f15ab012aa211ba57cd0dcf + last_write_checksum: sha1:99148d75abcb18c91ba0a801174461346508f5fb + pristine_git_object: f6c10969a931eaf1a4667b0fcff3765f57658b15 src/mistralai/client/models/conversationrestartrequest.py: id: 681d90d50514 - last_write_checksum: sha1:0ce81536464db32422165c35252770f3197fb38e - pristine_git_object: 35d3099361274440552e14934b6a1b19ebc8f195 + last_write_checksum: sha1:99123cee7c54f44c02b56111305af399143b4e5a + pristine_git_object: 7ae16aff4de36a91093d3021b66283e657b00897 src/mistralai/client/models/conversationrestartstreamrequest.py: id: 521c2b5bfb2b - last_write_checksum: sha1:b996f57271f0c521113913f48b31d54c17d73769 - pristine_git_object: 0ddfb130d662d954c3daabdf063172b8ea18a153 + last_write_checksum: sha1:abfd14652b4785c36de84a59593b55f7a6a2d613 + pristine_git_object: 0e247261d997ac3d8ff0155ba54cc4cafe9ac65a src/mistralai/client/models/conversationstreamrequest.py: id: 58d633507527 - last_write_checksum: sha1:fc4f2f1578fbeb959ddbe681dee2d11f0a4e6c5e - pristine_git_object: 379a8f2859b5f40cc744ad8f9bc6c39a198258b5 + last_write_checksum: sha1:7dc25a12979f4082ed7d7e37584bb9c30297f196 + pristine_git_object: a20dccae1a60753ed95f59da0df78c204c19d515 + src/mistralai/client/models/conversationthinkchunk.py: + id: 77e59cde5c0f + last_write_checksum: sha1:5db067661a5d4b0c13db92ad93da1aab9e0e7a34 + pristine_git_object: e0e172e3edbe46c000e82e712c135b96a65312e9 src/mistralai/client/models/conversationusageinfo.py: id: 6685e3b50b50 - last_write_checksum: sha1:60f91812b9b574b3fade418cc7c2191253f6abbf - pristine_git_object: 98db0f1617bd7484750652997dcd43d08ef7c5fc - src/mistralai/client/models/createfinetuningjobop.py: - id: fd3c305df250 - last_write_checksum: sha1:e29ada8f733de44bfeab2885d2221ade84b34619 - pristine_git_object: f55deef5d9f6134fddb02c458a0d812759cea358 - src/mistralai/client/models/createorupdateagentaliasop.py: - id: a79cf28bda01 - last_write_checksum: sha1:d4f2790b5970c9cf30b3fcee9d8bc6d4b8c33778 - pristine_git_object: cde1dd054c447a8617527585e783a95affba3277 - src/mistralai/client/models/deleteagentaliasop.py: - id: e4d0d7f75b24 - last_write_checksum: sha1:66e34ba7fb1a238d55c7ed380bd666c8975c01b4 - pristine_git_object: c52d099e9c1f28bf37ee009833b5fb8e351ed987 - src/mistralai/client/models/deleteagentop.py: - id: 089fb7f87aea - last_write_checksum: sha1:a196bcc758e36ffeb17fab25bb60451d3d66a4d8 - pristine_git_object: 8b14bca7bf5d67e16181b67ef6b7375c1b0a93fd - src/mistralai/client/models/deleteconversationop.py: - id: 86fefc353db0 - last_write_checksum: sha1:48f33b614ec087fdaf2b29d9c3eefd6e8d7d311f - pristine_git_object: 39607f40640c6dfa3ef20d913a90abee602b9b4a - src/mistralai/client/models/deletedocumentop.py: - id: 62522db1ccf2 - last_write_checksum: sha1:1a4e2e72a0d3cd24e184ce3cc5037f5ec7cdd9a5 - pristine_git_object: 400070a49bc046d8132bfc7dfe3e114faa719001 - src/mistralai/client/models/deletefileop.py: - id: 286b4e583638 - last_write_checksum: sha1:2561c1fe03ec3915dfa48fa354a86a56ba9b54c4 - pristine_git_object: 4feb7812f8acfa366e4b46fc914925df4f705528 - src/mistralai/client/models/deletefileout.py: - id: 5578701e7327 - last_write_checksum: sha1:a34520be2271c1e37fa8b3c1bdead843db7b1bb9 - pristine_git_object: c721f32cfe752c2c084efb72db3e5409795e387a - src/mistralai/client/models/deletelibraryaccessop.py: - id: df80945bcf19 - last_write_checksum: sha1:065aad372e0bbfd998fe3adc3389e3dbc9d5b674 - pristine_git_object: ca14c3ffc43be3aee14d6aa1f4805f0483d8b676 - src/mistralai/client/models/deletelibraryop.py: - id: cd0ce9bf8d51 - last_write_checksum: sha1:07840cbdb741bba291f1db1a1b54daca99e8f7ea - pristine_git_object: 5eb6fc310aa62454e3f7ed0766212c807125fe8c - src/mistralai/client/models/deletemodelop.py: - id: 2c494d99a44d - last_write_checksum: sha1:97dce35d527e03612068896572824cc0f13269c1 - pristine_git_object: 55c4b2422336ef6e148eedbd4a6a60846d187e9b + last_write_checksum: sha1:3e0489836936a7a77fa3b41adde1eb459ecd176d + pristine_git_object: 1e80f89ee4f7a3d464df2bf39990b467029e86c1 + src/mistralai/client/models/createagentrequest.py: + id: 442629bd914b + last_write_checksum: sha1:273dde9338cc1eb166ee40f4c6215f90cae908ab + pristine_git_object: 54b09880eefe348d2e003ed1b238b67cb58b8e34 + src/mistralai/client/models/createbatchjobrequest.py: + id: 56e24cd24e98 + last_write_checksum: sha1:e648017622cd6e860cb15e5dd2b29bf9f2a00572 + pristine_git_object: 9a901fefee0ea6a825274af6fd0aa5775a61c521 + src/mistralai/client/models/createfileresponse.py: + id: fea5e4832dcc + last_write_checksum: sha1:b7f3ba95a09a3225eae80b53152fe2b7d3806fbe + pristine_git_object: 768212803bc3535ac8a27a9c0d48f147e3d536b7 + src/mistralai/client/models/createfinetuningjobrequest.py: + id: c60d2a45d66b + last_write_checksum: sha1:2e8e608140860bba9ecfa9498d61cf807f96680a + pristine_git_object: e328d944ce2a71ffbec027965d31075070647dbc + src/mistralai/client/models/createlibraryrequest.py: + id: 1c489bec2f53 + last_write_checksum: sha1:45fa65be82712ce99304027c88f953f0932bdae4 + pristine_git_object: 58874e014275b06ce19d145aaa34a48d11ca0950 + src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py: + id: 767aba526e43 + last_write_checksum: sha1:73568f2f450bf9c23aca3649372a92e1b9a2fc54 + pristine_git_object: 199614f53501f34088cb112d6fe1114e1e588d8a + src/mistralai/client/models/deletefileresponse.py: + id: 3ee464763a32 + last_write_checksum: sha1:2c0df66fc8c4384d50e54ac03577da3da2997cf5 + pristine_git_object: ffd0e0d015e38e5f6113da036ebeba98441444f4 src/mistralai/client/models/deletemodelout.py: id: ef6a1671c739 - last_write_checksum: sha1:4606683ef6da0aae7e88bc50144eddc83908f9d7 - pristine_git_object: bf22ed177ee91dce98bfd9b04f02e683c79e4860 + last_write_checksum: sha1:d67ac7c3fa143be40c74455c7206c94bfb5a2134 + pristine_git_object: fa0c20a419c59b8fc168c150b28d703398ea7f40 src/mistralai/client/models/deltamessage.py: id: 68f53d67a140 - last_write_checksum: sha1:ff7fa85086bd56863f7f4a255b008cfaa11a959c - pristine_git_object: fbb8231a310e90afd50951dd0f572ce3e0f029e6 + last_write_checksum: sha1:b18350de03a8685bea5ac52e1441415b5e58bdf4 + pristine_git_object: d9fa230e93d4e0886f21c836cf3813855eb8f9fd + src/mistralai/client/models/document.py: + id: fbbf7428328c + last_write_checksum: sha1:2a5a28c54f0aec50059b6badc1001b1cd120e7d3 + pristine_git_object: 31eebbd1a7d7fdcb498259837c533bfc8008a6f9 src/mistralai/client/models/documentlibrarytool.py: id: 3eb3c218f457 - last_write_checksum: sha1:e5bfb61a4a03a3b28837c27195f1bcd8cc14c6b2 - pristine_git_object: ff0f739391404604c1cc592c23507946aa0b693f - src/mistralai/client/models/documentout.py: - id: 7a85b9dca506 - last_write_checksum: sha1:f041a4866c67d1f81f62282918d625216a760355 - pristine_git_object: 3b1a5713c84512947a07d153792b17fcf3262dcb + last_write_checksum: sha1:d03a6136192b56778bd739d834d9bdc80a09cc23 + pristine_git_object: 642c3202b11c5bb8a2b41cf8ae0fe43f73aa2a81 src/mistralai/client/models/documenttextcontent.py: id: e730005e44cb last_write_checksum: sha1:c86f4b15e8fda1cd5c173da01462342cd22b7286 pristine_git_object: b6904cb4267347b62a457a01b91a391500326da9 - src/mistralai/client/models/documentupdatein.py: - id: d19c1b26a875 - last_write_checksum: sha1:bddd412de340d050cfbdd4206a9fbb3d1660a045 - pristine_git_object: 669554de5d33f6163c8d08fefee52c1869662eba src/mistralai/client/models/documenturlchunk.py: id: 4309807f6048 - last_write_checksum: sha1:186a684da48bb5d237769ecb3dbf1479a5c5ee55 - pristine_git_object: 304cde2b687e71b0d2fb0aee9b20826473375b25 - src/mistralai/client/models/downloadfileop.py: - id: 4d051f08057d - last_write_checksum: sha1:b80c5332cfdb043bb56f686e4e1c4bf26495b04b - pristine_git_object: fcdc01d644bdce8d1fc7896b5f8244a7a5311dfa + last_write_checksum: sha1:33cdaccb3a4f231730c7fa1db9f338a71e6311b2 + pristine_git_object: 43444d98b8b7fb430f9c33562c35072d9c79a263 src/mistralai/client/models/embeddingdtype.py: id: 77f9526a78df last_write_checksum: sha1:a4e2ce6d00e6d1db287a5d9f4254b0947227f337 pristine_git_object: 732c4ebe3678563ebcdbafd519f93317261586fb src/mistralai/client/models/embeddingrequest.py: id: eadbe3f9040c - last_write_checksum: sha1:6071612944c4c603803cc7f2adc1e9784549c70f - pristine_git_object: f4537ffa9bdc0a9a73101e1b1524fed1a09c1a65 + last_write_checksum: sha1:e36282eb015b782804b4bdf3d18b596607b020fd + pristine_git_object: 15950590fec8b82a4fb28d69009a6f6cfb83c9ee src/mistralai/client/models/embeddingresponse.py: id: f7d790e84b65 last_write_checksum: sha1:9bb53a5a860c8e10d4d504648d84da73068c0a83 pristine_git_object: 6ffd68941f32f396998df9dded14ff8365926608 src/mistralai/client/models/embeddingresponsedata.py: id: 6d6ead6f3803 - last_write_checksum: sha1:3e2430e6bd9b3c77a564f4e56edec1274446a1f4 - pristine_git_object: a689b290d5a4b360e409413c96bb5e7288ce2e2e + last_write_checksum: sha1:ba5f38ee6e2b0436532229da01ba79ee49c20d12 + pristine_git_object: 098cfae06eae6a92830b4b5a26985f5d5950e512 src/mistralai/client/models/encodingformat.py: id: b51ec296cc92 last_write_checksum: sha1:ea907f86b00323d99df37f7ff45d582aace798e7 @@ -2156,262 +2112,298 @@ trackedFiles: id: 62d6a6a13288 last_write_checksum: sha1:015e2db9e8e5a3e4ce58442ccedaf86c66239dde pristine_git_object: 56d82cbed237f32a8b00cfee4042dfe3e7053bcb - src/mistralai/client/models/eventout.py: - id: da8ad645a9cb - last_write_checksum: sha1:67f7cc29102a971d33b6cbbcb06ffcfe595227a5 - pristine_git_object: a0247555bb816061cb22f882406c11c3a9011818 + src/mistralai/client/models/event.py: + id: e5a68ac2dd57 + last_write_checksum: sha1:8ed848fe2e74c7f18ee8f4dcba39ad1c951c16d2 + pristine_git_object: c40ae2b1a1b8131a90c637e3268872b97b22683e src/mistralai/client/models/file.py: id: f972c39edfcf - last_write_checksum: sha1:8d0adce8f4dfc676f6da6465547a0d187d4326f1 - pristine_git_object: dbbc00b50e5578230daefa47648954ead8ed8eb9 + last_write_checksum: sha1:609381a40a4bfdda2e7e750a848cd2bb38d6ac0f + pristine_git_object: 1b0ea1d4a288d9723dcdd7cfda99d49c5cbd9e7c src/mistralai/client/models/filechunk.py: id: ff3c2d33ab1e - last_write_checksum: sha1:9f970ef8366df8087f9332a4b1986540063a1949 - pristine_git_object: 43ef22f861e0a275c7348133d0c4d04551477646 + last_write_checksum: sha1:d7561c39252b81007a8e079edb4f23989ffd510e + pristine_git_object: 5c8d2646dc0d5c732828bdd81c5a58e12fa92a42 src/mistralai/client/models/filepurpose.py: id: a11e7f9f2d45 last_write_checksum: sha1:8b167c02f9f33e32d5fd1c6de894693924f4d940 pristine_git_object: 49a5568ff82ad4a85e15c8de911e8d6c98dcd396 + src/mistralai/client/models/files_api_routes_delete_fileop.py: + id: 2f385cc6138f + last_write_checksum: sha1:ccfd3ff64635cfd511f49c5e02a6f1860c479966 + pristine_git_object: eaba274b9dd94d6cf729325316b3e3e9b3834566 + src/mistralai/client/models/files_api_routes_download_fileop.py: + id: 8184ee3577c3 + last_write_checksum: sha1:81058ede2a5eb333b54561f99ed7878082c0f411 + pristine_git_object: 83de8e73a3d50917e4a41bb92a828a10e646a632 + src/mistralai/client/models/files_api_routes_get_signed_urlop.py: + id: 0a1a18c6431e + last_write_checksum: sha1:ef4908b9d2e43c0256d25a5aa533c5bdc1205113 + pristine_git_object: 64cd6ac57b4f2de70403e11062307a8d8d5d94e7 + src/mistralai/client/models/files_api_routes_list_filesop.py: + id: b2e92f2a29b4 + last_write_checksum: sha1:71e67fc63f0df28c534d4bd03a6464ae88959dc2 + pristine_git_object: b03e2f886ce02d4beabca150302a924ae63ad507 + src/mistralai/client/models/files_api_routes_retrieve_fileop.py: + id: 5d5dbb8d5f7a + last_write_checksum: sha1:d451d8d2b32f412158a074919cca1a72f79940cb + pristine_git_object: 5f8de05f1bba07517dc2ee33a4f05122503b54b5 + src/mistralai/client/models/files_api_routes_upload_fileop.py: + id: f13b84de6fa7 + last_write_checksum: sha1:d38a86b9e7d338278e14c68756654d85bc330070 + pristine_git_object: 54ff4e4951a58e13993be0f5d2c16b0cb11c0978 src/mistralai/client/models/fileschema.py: id: 19cde41ca32a - last_write_checksum: sha1:245115d1f955324bce2eeb3220bdaa6906b28e92 - pristine_git_object: cbe9b0d17ad15ce02e9fd973fe49666885c6ff92 - src/mistralai/client/models/filesignedurl.py: - id: a1754c725163 - last_write_checksum: sha1:5d981b1743aa2d84818597b41a5f357b4256e9e0 - pristine_git_object: 53dff812ffe5c5859794424d49f8bd7f735cf3b0 + last_write_checksum: sha1:0b3acb889a2c70998da4076e2f4eef3698e8b117 + pristine_git_object: e99066a9eb19daebcf29f356225635a297c444e1 src/mistralai/client/models/fimcompletionrequest.py: id: cf3558adc3ab - last_write_checksum: sha1:db51cde0b13bb373097f2c158b665ccb3c5789f4 - pristine_git_object: e2f6032784c996d18c100b8b2cde4bb4432af884 + last_write_checksum: sha1:20bca1f6a0ab6e84f48b6e332f0c3242da84ae45 + pristine_git_object: ea877213d1abe4811fee188eb7a60ccf1bb51f18 src/mistralai/client/models/fimcompletionresponse.py: id: b860d2ba771e last_write_checksum: sha1:dffd5a7005999340f57eaa94e17b2c82ddc7fd90 pristine_git_object: 1345a116b7855ab4b824cf0369c0a5281e44ea97 src/mistralai/client/models/fimcompletionstreamrequest.py: id: 1d1ee09f1913 - last_write_checksum: sha1:df973050b942b844280bf98f0a3abc90bd144bbb - pristine_git_object: 480ed17ab006e7afa321a91c5ccebd6380f8f60c + last_write_checksum: sha1:aa8313ecdd852034aaf6ec23dc3f04f7ef8e28e5 + pristine_git_object: e80efc095feb2e2df87f6d3c3f9c56b6cbf347b3 src/mistralai/client/models/finetuneablemodeltype.py: id: 05e097395df3 last_write_checksum: sha1:daf4cd1869da582981023dea1074268da071e16a pristine_git_object: 7b924bd7abc596f0607a513eee30e98cbf7ab57a + src/mistralai/client/models/finetunedmodelcapabilities.py: + id: 475c805eab95 + last_write_checksum: sha1:5919e48a6778f1a2360ce090d05b41b1bf33253f + pristine_git_object: 2f4cca0b8c0e3e379f5c2aa67953f2e55757f68d src/mistralai/client/models/ftclassifierlossfunction.py: id: d21e2a36ab1f last_write_checksum: sha1:ca90e2f1cd0b9054293bea304be0867c93f7fac2 pristine_git_object: ccb0f21b5a69f91119bec9db6e9f3d876e4c35af - src/mistralai/client/models/ftmodelcapabilitiesout.py: - id: f70517be97d4 - last_write_checksum: sha1:2bc7700ad89b7aab37fa02fcb6d9282bc252315e - pristine_git_object: 42269b785d9d5ad2257179f2c093c62637fb5dd6 src/mistralai/client/models/ftmodelcard.py: id: c4f15eed2ca2 - last_write_checksum: sha1:7441e4155beaa97cea47b6295017f567dd6eee1a - pristine_git_object: 570e95e2276b144e008e9ccf6a108faa1fc835f5 + last_write_checksum: sha1:b1b36ff994bcadd8c917880333627fd05976c991 + pristine_git_object: 2c26ff2f66faa55dc5a5a1743720e8f3f5d4d0f1 src/mistralai/client/models/function.py: id: 32275a9d8fee - last_write_checksum: sha1:356a2c6c9d2437e60036a9b3d1a3d154302363c8 - pristine_git_object: 3632c1afb40aebab0795f754814036e04c251469 + last_write_checksum: sha1:ca24a512de22787932d7f4af005699621926d6c0 + pristine_git_object: 1da1dcc9b637d0a5b0fbb7cf2761f6d01eb3068f src/mistralai/client/models/functioncall.py: id: 393fca552632 last_write_checksum: sha1:6e96e9abaa9b7625a9a30e376c31b596ee9defcb pristine_git_object: 527c3ad408e1e1ccfe6301a8860e7f751e1d312d src/mistralai/client/models/functioncallentry.py: id: cd058446c0aa - last_write_checksum: sha1:6ece3816c50bd04b908743ad62e2dc71d815842a - pristine_git_object: 6ada1d358641a23bc83b93f222eeff659a124b34 + last_write_checksum: sha1:776f397d17f946bae2929998f14d991a1ccc99e0 + pristine_git_object: d05fad856729a76dd24f8aa4d050f8381e51ed6a src/mistralai/client/models/functioncallentryarguments.py: id: 3df3767a7b93 last_write_checksum: sha1:9858feba8f7f01017f10477a77dec851a1d06e55 pristine_git_object: afe81b24e131a8ef879ee7f140271aa762b8ed2f src/mistralai/client/models/functioncallevent.py: id: 23b120b8f122 - last_write_checksum: sha1:cb63fb3cfb4debfca7b207b49e592566619f84b1 - pristine_git_object: 5d871a0e0f15cc27afe3c861f387609aa9a8a17f + last_write_checksum: sha1:62b5b94df4e5b6f945ead78871cdbfceb6cd40cf + pristine_git_object: 849eed76d08524e5e4d1e7cc1c3fa04386f5ef75 src/mistralai/client/models/functionname.py: id: 000acafdb0c0 last_write_checksum: sha1:4145b7b817b712b85dcbedb309416c7ba72d827e pristine_git_object: 07d98a0e65ccbcba330fb39c7f23e26d3ffc833c src/mistralai/client/models/functionresultentry.py: id: 213df39bd5e6 - last_write_checksum: sha1:04a8fd7396777c412fa9c73c0bef148b2ab53cb2 - pristine_git_object: ca73cbb7481fe0e97b354e9abe5ef6034f10bd98 + last_write_checksum: sha1:3aa6834bf2beda061ac772a0a8a4d7ed5ad942a0 + pristine_git_object: 01e2e36fc0a9de6a2b06a4205004992baf0f9e43 src/mistralai/client/models/functiontool.py: id: 2e9ef5800117 - last_write_checksum: sha1:5c4ea61a1bccd87e1aae06bfa728c29a4ec60c54 - pristine_git_object: 13b0449687f64848cb2f2fdf792f148f9e3cfed9 - src/mistralai/client/models/getagentop.py: - id: 5a28bb1e727e - last_write_checksum: sha1:50a681253a1075f1268a269cd67154efa35dff6a - pristine_git_object: 55d8fe6860fa4c868c4d6d5d5d2ce4571e9071b4 - src/mistralai/client/models/getagentversionop.py: - id: a0db5a6aab1f - last_write_checksum: sha1:d1dfc0927abcae22460838902d1f5ddc2a224856 - pristine_git_object: 77b8a2662939e03b261f713aa7d9676746a4df1e - src/mistralai/client/models/getbatchjobop.py: - id: 443103fe3b88 - last_write_checksum: sha1:3a7f9656f3d169c60f0d3f16b00c4136d193468e - pristine_git_object: 792c3e2121902734094a7224c8605109fc697f44 - src/mistralai/client/models/getconversationhistoryop.py: - id: c863a4cbeb34 - last_write_checksum: sha1:4e04b4550c7b48635eca1943bcfee64027f0e7ca - pristine_git_object: c1fbf3de4ee966fffa2400a9c109d952b26543da - src/mistralai/client/models/getconversationmessagesop.py: - id: bb8a90ba7c22 - last_write_checksum: sha1:1b7aad5c74338aeecb11de44d8378aaa75498e37 - pristine_git_object: 6666198edce05a99c55f1c35f26f6d3b548c9b0d - src/mistralai/client/models/getconversationop.py: - id: 1a622b8337ac - last_write_checksum: sha1:4665e81fae4f12fabc09629f32d28c1c2de2bcf2 - pristine_git_object: d204d1755b4dc23ba8397ad24fec30bd064eacce - src/mistralai/client/models/getdocumentextractedtextsignedurlop.py: - id: 69099395d631 - last_write_checksum: sha1:f6d5e8499a314e903301e419fb206c33644363ff - pristine_git_object: 9a71181d3abd625643e741c562fe73f25bf12932 - src/mistralai/client/models/getdocumentop.py: - id: de89ff93d373 - last_write_checksum: sha1:4d1f358dfe3b44ccd2a88aea6730fbaf4b5f1d93 - pristine_git_object: d7b07db791a3adb3992475f0cf49c3fe01007ad9 - src/mistralai/client/models/getdocumentsignedurlop.py: - id: b8d95511c6d1 - last_write_checksum: sha1:255a0b505d558db3149652822718c7bcecc706e8 - pristine_git_object: e5d56c54c1ffc3529a8d1cf013bcb3327392b269 - src/mistralai/client/models/getdocumentstatusop.py: - id: f1f40b8f003f - last_write_checksum: sha1:c442daff8adb3db0ac58b03e54b7c05c82b202a9 - pristine_git_object: 4206f593ca58650f9df17b377b67c374a1b0d883 - src/mistralai/client/models/getdocumenttextcontentop.py: - id: ba23717093ef - last_write_checksum: sha1:33f047af38e4be2b71f4d90a36614ea7ab096a28 - pristine_git_object: 8a7b4aae025bbcb5ade5d4d36f2bb5e34cbb315e - src/mistralai/client/models/getfilesignedurlop.py: - id: 1aa50b81c8cf - last_write_checksum: sha1:a8fb95f119d173dd1d7afed02597a297dbbc7a89 - pristine_git_object: 06ed79eea058d4ebffc5d0b87ae2d06a32f4755a - src/mistralai/client/models/getfinetuningjobop.py: - id: afe997f96d69 - last_write_checksum: sha1:25db6d0d336a78189b603bbce16b0e0de84a33f1 - pristine_git_object: 1fb732f48a1a4c2993185a6a272879a83c80dc06 - src/mistralai/client/models/getlibraryop.py: - id: c84a92e23a90 - last_write_checksum: sha1:d51c0cf40a6ed398b0cb7078fe897d047b55e251 - pristine_git_object: bc0b4a238b146c6e5853e0b9d3031a876f30bc17 + last_write_checksum: sha1:bce744d77a3dac92d4776a37be497311674bdc7d + pristine_git_object: eae872643c85115a825c2feda11d9a6c12a06b99 + src/mistralai/client/models/getfileresponse.py: + id: 81919086e371 + last_write_checksum: sha1:fc0232e54c0de355058c5bd82e424953b1659b56 + pristine_git_object: f625c153799dcd38e4990504d48371112b65cd15 + src/mistralai/client/models/getsignedurlresponse.py: + id: cee4e4197372 + last_write_checksum: sha1:ab9adbc06e7f02e791dc549ad1850ce1b1a250a7 + pristine_git_object: 4ba95894f2b89719fa58e7e397c28014dbd00316 + src/mistralai/client/models/githubrepository.py: + id: 4bc83ce18378 + last_write_checksum: sha1:21aa04bc426158ccbe1ded3bc65b46e6869e897d + pristine_git_object: 84b01078c2192de5d6668a6943d416a2ff30db5f src/mistralai/client/models/githubrepositoryin.py: id: eef26fbd2876 - last_write_checksum: sha1:cc98805951c3f80d9b8f0ba4037cf451551b0742 - pristine_git_object: e55389c380416f69ed7dc085cbbaaba056c4d1ba - src/mistralai/client/models/githubrepositoryout.py: - id: d2434a167623 - last_write_checksum: sha1:76d98ac7613e626599cb4c7a0b0366e9b20815ff - pristine_git_object: 514df01c217b40d8c050839ac40b938c68ef1bf6 - src/mistralai/client/models/httpvalidationerror.py: - id: 4099f568a6f8 - last_write_checksum: sha1:be2db0d4ec07da0ddb37878761545c3dde8fb8ec - pristine_git_object: e7f0a35bf208c32086c7b448273d1133d0f1027b + last_write_checksum: sha1:18bd07155fff4b99d114353fee95e6bd828aeacd + pristine_git_object: 38bcc2087630f2fd4e9e5fa149449c32e21fdb07 + src/mistralai/client/models/imagedetail.py: + id: c1084b549abb + last_write_checksum: sha1:375db5c8fa87712dc37e46d0bf72283ae6cd6400 + pristine_git_object: 1982d357277a92fc7ebea3b99146116596d99c78 src/mistralai/client/models/imagegenerationtool.py: id: e1532275faa0 - last_write_checksum: sha1:85122792c3ba324086096345119fedf326f55c86 - pristine_git_object: 680c6ce2d08277e65e23ea3060e83c1fa4accb78 + last_write_checksum: sha1:88a1347876f69960dc33f8e2cb9929ab1a90a224 + pristine_git_object: c1789b18028156ae683d0323e65e47a43694570f src/mistralai/client/models/imageurl.py: id: e4bbf5881fbf - last_write_checksum: sha1:9af5cff0b3a2c1c63e2bd1f998dcfeab273fd206 - pristine_git_object: 4ff13b1ccbc157f21013aacd7a062e89a26dcbf9 + last_write_checksum: sha1:28ef2509fdb489ecf379b60e883e6957aebd2797 + pristine_git_object: ac1030f5d61144e393b2aa9f3ffea893faabb1f7 src/mistralai/client/models/imageurlchunk.py: id: 746fde62f637 - last_write_checksum: sha1:57e48972720a3e317291250d6d94c44d295b69f5 - pristine_git_object: 993185cce833c59ad341b977cf9263654951fa03 + last_write_checksum: sha1:0ac388d25cae5348ffb3821706c3a8b64e716ff5 + pristine_git_object: 7134b46e7428cee52eda859cb78387c99f7e1f5a src/mistralai/client/models/inputentries.py: id: 44727997dacb - last_write_checksum: sha1:44ef8e75dd43b82276a0f06ef5c6be9eed46b379 - pristine_git_object: dc9892956f0e2583c51bf49ef89adbd22b8646d5 + last_write_checksum: sha1:9e2a776be59c5043ea4179a60ac082faf064cc3d + pristine_git_object: e2da5a80aea121d18e2232f302ad73f63b4fc050 src/mistralai/client/models/inputs.py: id: 84a8007518c7 - last_write_checksum: sha1:871491fa3b24315bc1bddf371334381f75ab035d - pristine_git_object: cfcdeb3d5895ccb34512c2a0a2e799e763e09c09 + last_write_checksum: sha1:d067587b5395529fbd638741f20b80edb2848e39 + pristine_git_object: 9ecd7f484ea306b91a9ebf038a0addd80ccd57c4 src/mistralai/client/models/instructrequest.py: id: 6d3ad9f896c7 last_write_checksum: sha1:b56a77442b50b50151adedaa5ec356dc96c56428 pristine_git_object: e5f9cccf174d8e73c42e8ee4aa294b43e1ad6cf5 - src/mistralai/client/models/jobin.py: - id: f4d176123ccc - last_write_checksum: sha1:478a9beaf1c5ada536f5c333a47aa2ac0900bd16 - pristine_git_object: b3cb8998b5b0ce00414e40643eb3e259b2c0aabf - src/mistralai/client/models/jobmetadataout.py: - id: 805f41e3292a - last_write_checksum: sha1:1333181d5a3dff43076095f61e1d57f37085abbe - pristine_git_object: 1d386539d8c638d96b8f468cfca3241dfc07a9f3 - src/mistralai/client/models/jobsout.py: - id: 22e91e9631a9 - last_write_checksum: sha1:e9434f43df7df8e991eb0387eabcf308cae3cb65 - pristine_git_object: a4127a5d835c0f0ead04980f05cb293e18970905 + src/mistralai/client/models/jobmetadata.py: + id: cfbdde7fc0a2 + last_write_checksum: sha1:e1b180a47ca888d0fd4cbc34b62000d3ac86c2b5 + pristine_git_object: f6e96fa104e7a6c8ce9a94538a3d00167a2ae341 + src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py: + id: b56cb6c17c95 + last_write_checksum: sha1:21b5794f110c53691654d7195201f9a4b7793f21 + pristine_git_object: de2e63472ac53809cfeae200bd7d2f3dcbb70034 + src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py: + id: 36b5a6b3ceee + last_write_checksum: sha1:b41862f037d74bbdc44fb4df5f65cd402a16703b + pristine_git_object: d779e1d96c359b0d548d5dee17c06ae2a505cf47 + src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py: + id: d8f0af99c94d + last_write_checksum: sha1:a50885f97cfd4d38bc3e3b0746c88bd602b88f94 + pristine_git_object: 89ac3c933347497b6fb1ec26fecb485802ef85fc + src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py: + id: 34f89d2af0ec + last_write_checksum: sha1:3d5242f757ee9be10963af9cd5d47824fc83c71a + pristine_git_object: 9fa99837dda7e9413d3a05822cd17107c5fae51d + src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py: + id: d175c6e32ecb + last_write_checksum: sha1:515b7737cf8262243ee6175e297714125f3962bc + pristine_git_object: 56fa534044522f27fb26ef4820d10f22752134ea + src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py: + id: 81651291187a + last_write_checksum: sha1:19a0707e2f73b0184959d7c710a170650fa1767a + pristine_git_object: db857f7d6cc77057491e4b968798f730228b09bc + src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py: + id: d910fd8fe2d6 + last_write_checksum: sha1:52704f01d7388a8b62d59b6f7cd94fcb7d067ebf + pristine_git_object: ddd9c1891356a7c272e0244a9aea3d3d6b2d00d6 + src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py: + id: cf43028824bf + last_write_checksum: sha1:36082bde6f3d932c66178729533e2a69040fdeab + pristine_git_object: ec80a158f45061b122f84ebaff89ae82ef8d98ef + src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py: + id: e7ff4a4a4edb + last_write_checksum: sha1:8cbfc309c09df806ad7d130004b4e1c2b89ede0a + pristine_git_object: cd25fa04f29dd544f01f3620b31d1c54c86addbb + src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py: + id: 7cc1c80335a9 + last_write_checksum: sha1:f66c16423155066b844f8e89446d2acbb6e68157 + pristine_git_object: fd01fe6948613b0fffef9ac76cf1a0f9011ec5af + src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py: + id: 6d9dc624aafd + last_write_checksum: sha1:fbacb171b9c75f1fe45406f542a958d10c15fae2 + pristine_git_object: 296070b426900305fe4596f03a3c9f081cdb2dcf src/mistralai/client/models/jsonschema.py: id: e1fc1d8a434a - last_write_checksum: sha1:6711508e9c1bd20fc8b1bfdbd1181ca29144ef0d - pristine_git_object: 948c94ed8fe8102a9cdced68fde6be03489f5778 - src/mistralai/client/models/legacyjobmetadataout.py: - id: 4f44aa38c864 - last_write_checksum: sha1:e93d512c8cb6e0812248a195ff869428209cd71f - pristine_git_object: 4453c15798f4fd4db2de64e0beaf7ad557d82fa1 - src/mistralai/client/models/libraryin.py: - id: 6147d5df71d9 - last_write_checksum: sha1:34c5c9582a488fe87da084e74316e0fd76aa28d1 - pristine_git_object: 1a71d410d997a6d3f197947f821117e0605517af - src/mistralai/client/models/libraryinupdate.py: - id: 300a6bb02e6e - last_write_checksum: sha1:c9b1a0a00d31fa839df12353f1a3ee9d0b3ffb60 - pristine_git_object: 328b2de3cd4e304fd462882eca7226e460b7c4a7 - src/mistralai/client/models/libraryout.py: - id: 4e608c7aafc4 - last_write_checksum: sha1:9841adb596398554dfcaeb35b7e5a0572c541cff - pristine_git_object: c7ab7b8d39b68b5998c4874f9942caa275cf65d9 - src/mistralai/client/models/listagentaliasesop.py: - id: ff038766a902 - last_write_checksum: sha1:eef4e471999d5df5195aea51cde027b55567aeef - pristine_git_object: 83c6d1769c10fe38402a36b6aff2a18da61f4504 - src/mistralai/client/models/listagentsop.py: - id: a573a873c404 - last_write_checksum: sha1:db3c9e6ddc146138ed971f9970d9a164c0f97456 - pristine_git_object: 863fc13af1429bd1a6c02a9a20d2b6cb0cad7b34 - src/mistralai/client/models/listagentversionsop.py: - id: ccc5fb48e78f - last_write_checksum: sha1:0f2306bcceba2a2d7bfeb0be33126514d9287f17 - pristine_git_object: 613d3d8516690e6cba15922dfe69bdf62c039b01 - src/mistralai/client/models/listbatchjobsop.py: - id: f49af453f5e6 - last_write_checksum: sha1:e48b0e7371ee8f637e4fd6bed140cdbb1d405a7d - pristine_git_object: 5322df816e391a5569afcfd14edaeb128467a176 - src/mistralai/client/models/listconversationsop.py: - id: d6007f6c1643 - last_write_checksum: sha1:ece12b550abe6e17eb79f7a05593a93ea055f3f6 - pristine_git_object: 1c9a347c0ad4801c3a1b941e6328061d23d7dcd5 - src/mistralai/client/models/listdocumentout.py: - id: b2c96075ce00 - last_write_checksum: sha1:fc3eca772d1e32938ea1bd2f3e98cdea5f1003f3 - pristine_git_object: a636b3deff66fe4277a63c04fc7dd6c5e74e58e7 - src/mistralai/client/models/listdocumentsop.py: - id: 3e42bdc15383 - last_write_checksum: sha1:d9beade6d8bb8050a67e32c2a73926b140015e68 - pristine_git_object: 0f7c4584d793c7e692a4bbc6678e18549b0e0364 - src/mistralai/client/models/listfilesop.py: - id: e5bd46ac0145 - last_write_checksum: sha1:3e0bc8a7318ffd1c3fe15f335ea2bc1e18c714a1 - pristine_git_object: a9af5c70c98adce56653ff01772fe5900530a36e - src/mistralai/client/models/listfilesout.py: - id: ae5fa21b141c - last_write_checksum: sha1:4bc8ef424beb41c75d9c6fa4e101d330a951a99f - pristine_git_object: 460822f71fe8b0fc6292b804dc2a9de29bff4ef5 - src/mistralai/client/models/listfinetuningjobsop.py: - id: b77fe203b929 - last_write_checksum: sha1:af98423b166930cd18a1d377ea688540f3364166 - pristine_git_object: 8712c3fa6ac24094532fdfc047561997ea34552f - src/mistralai/client/models/listlibraryaccessesop.py: - id: 581b332626b7 - last_write_checksum: sha1:0a6bd277a706d807d87d3f2a4f870cc6ba917928 - pristine_git_object: 2206310f301f6ea40f14a495f5f6c6b4e76dbbf7 - src/mistralai/client/models/listlibraryout.py: - id: cb78c529e763 - last_write_checksum: sha1:3cd81fd6f6d2421c6b6d06077f0bf1d5b3c96cad - pristine_git_object: 39fa459f7cc7be17c751025287d7827c9d141aac + last_write_checksum: sha1:d01507ab0a1f6067cbc65aaba199de340ccc68aa + pristine_git_object: dfababa694305c96f98ddebf2f09e448e737c855 + src/mistralai/client/models/legacyjobmetadata.py: + id: 0330b8930f65 + last_write_checksum: sha1:3c2f669a05cc01227f62d6a8da1840d9c458d52f + pristine_git_object: 5757675895b3c56d8aa7c174deb08567e596ecf8 + src/mistralai/client/models/libraries_delete_v1op.py: + id: b2e8bbd19baa + last_write_checksum: sha1:ba41496bc99040f7598659c5b037b955b7f6d385 + pristine_git_object: 893ab53b11672edd9cde175e68a80d89ff949cb6 + src/mistralai/client/models/libraries_documents_delete_v1op.py: + id: 81eb34382a3d + last_write_checksum: sha1:66d1c6ec5e2535b0db72a3beac65b25a1f2336d7 + pristine_git_object: 0495832efba33314f3cd28fe62759c6dac5ca706 + src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py: + id: a7417ebd6040 + last_write_checksum: sha1:030ca9fb7e10396e6b743ee644fe1a734e1df1f0 + pristine_git_object: 186baaed8346d106272fea2e4826587634b061bc + src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py: + id: d4b7b47913ba + last_write_checksum: sha1:fdad7a6d3ae9a9c69009caf8207b284835675a9a + pristine_git_object: ebcf85d77ed6982d510ae95a6971e1d4b3ad56ca + src/mistralai/client/models/libraries_documents_get_status_v1op.py: + id: f314f73e909c + last_write_checksum: sha1:11d463eb328a1133658e8ff92340edc7f75923e4 + pristine_git_object: 1f4847874cdeff26caaf5fd16e0f8382834ecb2b + src/mistralai/client/models/libraries_documents_get_text_content_v1op.py: + id: 1ca4e0c41321 + last_write_checksum: sha1:26133a83bf0ef063c78069da1bbb96d58f44f30c + pristine_git_object: e0508d66fce682ed20a029604897137940689327 + src/mistralai/client/models/libraries_documents_get_v1op.py: + id: 26ff35f0c69d + last_write_checksum: sha1:e87e56e8fb9f7c11d61c805362db4755a81186b9 + pristine_git_object: 857dfbe60c57af8b0fa6655a049ed336d70fb941 + src/mistralai/client/models/libraries_documents_list_v1op.py: + id: 756f26de3cbe + last_write_checksum: sha1:5a1a9e025dc7a1fedaa5199d396a73c4986d4113 + pristine_git_object: da7d793b65139a3894b077a9665b392e8a44e8a2 + src/mistralai/client/models/libraries_documents_reprocess_v1op.py: + id: dbbeb02fc336 + last_write_checksum: sha1:bd5013cb1581dd13642ce7edf1e701f5b0c967c4 + pristine_git_object: a2f9ba2a0465fb3a8eb3b9afbb41d813de348656 + src/mistralai/client/models/libraries_documents_update_v1op.py: + id: 734ba6c19f5f + last_write_checksum: sha1:e12ca003680ff17523fe09438cd8f71d00ea081e + pristine_git_object: 7ad4231f72901b675d9af67c33364592c86be5ab + src/mistralai/client/models/libraries_documents_upload_v1op.py: + id: "744466971862" + last_write_checksum: sha1:9691ac41ecf986c9ccfad81423d367f96b10f4b7 + pristine_git_object: 388633d1c7e906803b711ef2bbf37656624515a9 + src/mistralai/client/models/libraries_get_v1op.py: + id: d493f39e7ebb + last_write_checksum: sha1:25b3c2c1040cd73ebd6b988b8b27708831affefd + pristine_git_object: 7a51d6053aa2cf2e6524a80487fe9549eec3dfa1 + src/mistralai/client/models/libraries_share_create_v1op.py: + id: feaacfd46dd3 + last_write_checksum: sha1:72e07fb60edbe1989865ba2ac90349edeb183f7e + pristine_git_object: 00ea74824b2efc4150d2e547e2eee416e5f6f2ee + src/mistralai/client/models/libraries_share_delete_v1op.py: + id: 7f3a679ca384 + last_write_checksum: sha1:897857c11cf0c14a0a81ef122dec4395dc16c0ce + pristine_git_object: eca3f86a6135e702f8cb6412a5f215dac2335a8f + src/mistralai/client/models/libraries_share_list_v1op.py: + id: 8f0af379bf1c + last_write_checksum: sha1:d27e0360c504576c315350fc226d371da455a598 + pristine_git_object: 895a259059283a17cc7558e3cc03022e2d4dd259 + src/mistralai/client/models/libraries_update_v1op.py: + id: 92c8d4132252 + last_write_checksum: sha1:a252f68e65cdb47e27d7059f256381daf2847344 + pristine_git_object: 54b0ab708c665ccb841b1c8d0f2748c390850506 + src/mistralai/client/models/library.py: + id: 028a34b08f9c + last_write_checksum: sha1:65f02f963a0540385681b88c7c7fba98d0d704f4 + pristine_git_object: 1953b6fbc6d7ad245ccacd9d665fb29853b00af7 + src/mistralai/client/models/listbatchjobsresponse.py: + id: 99d94c86a871 + last_write_checksum: sha1:7530be5f80a0756527be94758e800e8118e53210 + pristine_git_object: 35a348a1160dcf6d82d58c70cea07e11730359fb + src/mistralai/client/models/listdocumentsresponse.py: + id: f593d8e66833 + last_write_checksum: sha1:0d842168856056ff681b2a1c36b87df8e0d96570 + pristine_git_object: c48b8c051ad0d1fb4aed8396697e57e782be5a40 + src/mistralai/client/models/listfilesresponse.py: + id: 85d6d24c1a19 + last_write_checksum: sha1:caf901685bfb6f13d707b89726aaf6e5116cd054 + pristine_git_object: 10a60126600343033a4b0511d717cac6f1924b4d + src/mistralai/client/models/listfinetuningjobsresponse.py: + id: 118e05dbfbbd + last_write_checksum: sha1:f0582740a6777039e9695d97f072b5a3c34b483e + pristine_git_object: 1e434c5986bf577e2b42cca943cc6896a83d1fa2 + src/mistralai/client/models/listlibrariesresponse.py: + id: df556a618365 + last_write_checksum: sha1:55afb46b1fa797bc46574e5256cd063574c6fcbf + pristine_git_object: 337fe105731d8f3ced1f8f1299ff4081b9d5bfbe src/mistralai/client/models/listsharingout.py: id: ee708a7ccdad last_write_checksum: sha1:18e6501b00a566121dfd6a1ce7b0e23fef297e45 @@ -2422,208 +2414,192 @@ trackedFiles: pristine_git_object: a95098e01843fe3b4087319881967dc42c6e4fef src/mistralai/client/models/messageinputcontentchunks.py: id: 01025c12866a - last_write_checksum: sha1:9eab6d7734dcd4bf9da5222c1927f5f40ef45db0 - pristine_git_object: 63cf14e7fcbc7c3969220b4f07109473b246bf49 + last_write_checksum: sha1:6a0988d4e52aa2e9f7b09ae1e3266ecf9639c22b + pristine_git_object: 1e04ce24d62db6667129b35eb28dabcfd4135ea8 src/mistralai/client/models/messageinputentry.py: id: c0a4b5179095 - last_write_checksum: sha1:b1b8f5b78eb5f57f5cfa7163ed49101736bcefaa - pristine_git_object: 15046d25130cda6571f07a456c2b5a67d2a3bcc0 + last_write_checksum: sha1:b5bad18b88c0bfbbddfdafa6dc50a09e40a6ebd7 + pristine_git_object: c948a13e3cc2071dd1b3d11c419ea61d51470152 src/mistralai/client/models/messageoutputcontentchunks.py: id: 2ed248515035 - last_write_checksum: sha1:df4ef4d17ce48df271ff2b8cab297ae305aa08ec - pristine_git_object: def7a4d27cd3d1479864a1d6af19e89bd57bff70 + last_write_checksum: sha1:dc7456e44084cba9cc6a46553fd64b1eb25f8d77 + pristine_git_object: bf455d17db16e4bc11da0ebb105a9f6ad4d63c01 src/mistralai/client/models/messageoutputentry.py: id: a07577d2268d - last_write_checksum: sha1:0633b8c619883bedb1a6ad732c5487c7e7f817f9 - pristine_git_object: 8752fc36bfec39e0ab79d4593ae0cb43ea00641c + last_write_checksum: sha1:38ad03422407925087835ab888c0be40bf5fa7fa + pristine_git_object: 6a9c52ed59af1497577be2538e7141d57eea4c8f src/mistralai/client/models/messageoutputevent.py: id: a2bbf63615c6 - last_write_checksum: sha1:bbdb2c840a7a196edcb6ac6170e8273cc47a495e - pristine_git_object: 39c1013939ea238cb1c7ccbc05480a6840400061 - src/mistralai/client/models/metricout.py: - id: 92d33621dda7 - last_write_checksum: sha1:6198ba9e2cd66fcf7f9fcc1cf89481edd432cf11 - pristine_git_object: 5705c71283ce7d4a01d60752657f39279c0f1f85 - src/mistralai/client/models/mistralerror.py: - id: 68ffd8394c2e - last_write_checksum: sha1:8b867eca5ca81aa6364f13c9d7e42f9b0d855724 - pristine_git_object: 862a6be8294db5b30bb06cb7b85d60c52ed8e8c9 + last_write_checksum: sha1:c3317ab9279c499dd7fb26f45799ca9369676ac7 + pristine_git_object: d765f4fd3c4e43c37063833368e4b21cc0bfbcf2 + src/mistralai/client/models/metric.py: + id: c6a65acdd1a2 + last_write_checksum: sha1:5ef7c75b278f16b412b42889ff0f2fc19d87cb7d + pristine_git_object: 1413f589f7f23991a12c1367bc6f287b5e07d4a4 src/mistralai/client/models/mistralpromptmode.py: id: 95abc4ec799a last_write_checksum: sha1:a1417b987bb34daeb73ca4e015c085814e6c8ad2 pristine_git_object: 9b91323e7545d636308064085ca16fc554eac904 src/mistralai/client/models/modelcapabilities.py: id: 64d8a422ea29 - last_write_checksum: sha1:5bc65733cf1c2f4ee8e1b422636fda754bdf8afe - pristine_git_object: c329efbcd9be212c7428c09f28f897834c9239d3 + last_write_checksum: sha1:0f733a45f06cb2c603b47134d999a2de4c0a7bb0 + pristine_git_object: d9293ccc163995cfe0419d05c90fe1ae8e75cf57 src/mistralai/client/models/modelconversation.py: id: fea0a651f888 - last_write_checksum: sha1:6186e845be2717da6116e20072835c050d3fdaa5 - pristine_git_object: c0bacb7fd9cd052ecb31a72c6bf593504034e069 + last_write_checksum: sha1:4c1b31d95351dea877e24bd452b32d8e22edf42e + pristine_git_object: bb33d2e0e047bc075cb7ae284958b80a5b5ee657 src/mistralai/client/models/modellist.py: id: 00693c7eec60 - last_write_checksum: sha1:89695c6a680da571c7a77c4544607bd83b3a93d5 - pristine_git_object: c122122c38a3331337cc702340cf1d3e0c9ef99d + last_write_checksum: sha1:de62fc6787f482e5df0ff0e70415f493f177b9a1 + pristine_git_object: 5fd835f24cd1098a153ebfb3e958038a183d28a7 src/mistralai/client/models/moderationobject.py: id: 132faad0549a - last_write_checksum: sha1:742d942d72b615432c066827b822290cf4d51d40 - pristine_git_object: 9aa4eb15d837ab2af97faa131a362d50a3a85482 + last_write_checksum: sha1:a8c1454a533e466216ef98dd198ae8959f51fa76 + pristine_git_object: e7ccd8f6f1f75704a973be7ebabc49617070c34a src/mistralai/client/models/moderationresponse.py: id: 06bab279cb31 last_write_checksum: sha1:b9158e575276c1e0a510c129347b9a98c5a70567 pristine_git_object: a8a8ec3d8d8a58deb3c1f8358c6dce5a9734f89c - src/mistralai/client/models/no_response_error.py: - id: 2849e0a482e2 - last_write_checksum: sha1:35b1651843a697024146d6377838b5b99c5c66d3 - pristine_git_object: 7705f1945567498ce606364490685a91b13cd8dd src/mistralai/client/models/ocrimageobject.py: id: 685faeb41a80 - last_write_checksum: sha1:663f11a19e067d424263eee40d8127cdc56fb72e - pristine_git_object: e95b67e17e51653bf194ad1cff3a926f34cf97c2 + last_write_checksum: sha1:13f4e4d33d8fb5b0ee842695d4cc8329bd7ca382 + pristine_git_object: 365f062b5674141aad4b1601a85bec7a56db4cff src/mistralai/client/models/ocrpagedimensions.py: id: 02f763afbc9f last_write_checksum: sha1:f572ed8992ba1ba4d53b705c4e8c94c85ae1290e pristine_git_object: 847205c6c74a621dd2ee6d9eb18d1acba8395c50 src/mistralai/client/models/ocrpageobject.py: id: 07a099f89487 - last_write_checksum: sha1:10cbd1dddcb1f1f5d530048130908ad0ce715928 - pristine_git_object: 4f4ccf43011fa2563f79bb70ae2a813b84f04074 + last_write_checksum: sha1:5089ac3f02e4225d6c95cc9f05b74013694536da + pristine_git_object: ffc7b3b615e17a8e0d76fea4081249b143d8fe4d src/mistralai/client/models/ocrrequest.py: id: 36f204c64074 - last_write_checksum: sha1:8e669292b846a5af4e3cee0b632524696e3067bc - pristine_git_object: 18b899dd5ecc162dc8e92622f56bed503fff80f7 + last_write_checksum: sha1:9e9009dace9ff36cbff0cb8de408a1e0585147a7 + pristine_git_object: 4ad337ced23b3bdad21785b8dc3fcadbb868d4f0 src/mistralai/client/models/ocrresponse.py: id: 2fdfc881ca56 - last_write_checksum: sha1:4a28dbfcc076c149e4f08a830d4d7f770836eb15 - pristine_git_object: 0a36e97500b4f62adac2526d7dd7cb85c9bdb8b8 + last_write_checksum: sha1:f1d18dbf4cd02f3598ae574d5033c30989fa6985 + pristine_git_object: e63eed987f4eb83f3406b15cf4d840fd43528a49 src/mistralai/client/models/ocrtableobject.py: id: d74dd0d2ddac - last_write_checksum: sha1:3116548673509f4e9f6a50d39f58ce3374823cc4 - pristine_git_object: e32ad894cd97546e635d12595051da103cde9fd8 + last_write_checksum: sha1:492f8e4c30b61330592768b13cffcf9a9eb2c0fa + pristine_git_object: 66bb050f30790c3fc51cdca1b73e847388fe50c5 src/mistralai/client/models/ocrusageinfo.py: id: 272b7e1785d5 - last_write_checksum: sha1:b8fb06d0dad22f958ac756e65d70f5ba410ad47a - pristine_git_object: a421d850450bb3f0b62853c931cd457434d2f174 + last_write_checksum: sha1:2b37766fdff72e7ec6e052f248362f7bb3989d2c + pristine_git_object: 2ec1322b29d7fe5246b9ad355a4997222b37970f src/mistralai/client/models/outputcontentchunks.py: id: 9ad9741f4975 - last_write_checksum: sha1:afb76f3af2952c2afab5397e348ddfd6dbb56c4f - pristine_git_object: 1a115fe8b4874a6bd86719d91332cd3db6d95b46 + last_write_checksum: sha1:16c43816ac7b7afd134bce1cda5bb44485d9fafe + pristine_git_object: fab7907b105cc9d9c738c5cca9c09eba9d5c4781 src/mistralai/client/models/paginationinfo.py: id: 48851e82d67e last_write_checksum: sha1:166961e2c0f573ba0677ee803820bb944a8a5efb pristine_git_object: 2b9dab6258249f7be87e1d4a73a2502e21fe1f0d src/mistralai/client/models/prediction.py: id: 1cc842a069a5 - last_write_checksum: sha1:ca391fc2f9faf1657392ceda952c2ee422121952 - pristine_git_object: 52f4adf1eb46d7d5679f9705871cd73e08ae8830 + last_write_checksum: sha1:3ee24375eb7f00cea0c9db6eebc564ce7067f295 + pristine_git_object: 0c6f4182ca8140e595f601b12fbd582034257587 src/mistralai/client/models/processingstatusout.py: id: 3df842c4140f last_write_checksum: sha1:007a476e4101cac4d2a9eef94d289f0f486d763a pristine_git_object: 3acadcc9792c286cd31031a80e108b74bc2c0c4e src/mistralai/client/models/realtimetranscriptionerror.py: id: 8c2267378f48 - last_write_checksum: sha1:b9642dd42c4092bdebe0a4f8d35c68152f259c05 - pristine_git_object: f8f2d3da9598ce0cd90d148ba1a9be0c5d6237cc + last_write_checksum: sha1:78637de61d6fc3bc1fff8e95c0a6f5ffc1a3e111 + pristine_git_object: c661e46100752119521f63045e8ebe79105ecc01 src/mistralai/client/models/realtimetranscriptionerrordetail.py: id: 5bd25cdf9c7a last_write_checksum: sha1:a226b10718b1fe4a661311cbd98ea3b1d1ac4163 pristine_git_object: cec1f6eabd44ceab4e58694a0862c9c90ea2f264 + src/mistralai/client/models/realtimetranscriptioninputaudioappend.py: + id: 8b03cde6e115 + last_write_checksum: sha1:abcf48a48b077e836e2ae5647d93bd61007b9aa7 + pristine_git_object: 8156a2704bd95b74875f7a9ac17191e026f08993 + src/mistralai/client/models/realtimetranscriptioninputaudioend.py: + id: c187ba1b551d + last_write_checksum: sha1:fa96156774481ca3b98f8c0f99b3b1db01280b37 + pristine_git_object: 473eedb744141faa3447929865a76129d5e96432 + src/mistralai/client/models/realtimetranscriptioninputaudioflush.py: + id: b27b600c310e + last_write_checksum: sha1:8a8eb7de4137cf8cd810d93d984009bf8dff51c4 + pristine_git_object: 553d14c7720b3d1388901989d8160f0e3318ba56 src/mistralai/client/models/realtimetranscriptionsession.py: id: 02517fa5411a - last_write_checksum: sha1:0073b248604f667e89e34cf01184a788ca84d63f - pristine_git_object: d20d0d8c94aeec425a2c1dfb93b72ac6878cb8af + last_write_checksum: sha1:eb9a23fb89e0bdb3bb6168f512488a98bd626bc1 + pristine_git_object: a74a457b1e54deb1fcd203ce5ff2c57691f16b18 src/mistralai/client/models/realtimetranscriptionsessioncreated.py: id: 4e3731f63a3c - last_write_checksum: sha1:d3fb5c5dc417a0ebb12a30770324674e055526ae - pristine_git_object: c4fa5774502699529e27870436ca65b9f88ccfe1 + last_write_checksum: sha1:6997848cf22dc90b10597eaf9f0dd966ace969af + pristine_git_object: bb96875ab913f3d6ff241a00d94a87e877637782 src/mistralai/client/models/realtimetranscriptionsessionupdated.py: id: 686dc4f2450f - last_write_checksum: sha1:7e4de1020672efc3503cda5b916b41056bf1d22b - pristine_git_object: a61fb05e8e5ba3ffa20bbb98bf61c17045c1f75c + last_write_checksum: sha1:e023fe0c8c54da644fc797c25dfeb070b6f0fd1c + pristine_git_object: fea5db4a1b956cb8253e4f147463c47958bfd989 + src/mistralai/client/models/realtimetranscriptionsessionupdatemessage.py: + id: 4e1b3fd7c5a3 + last_write_checksum: sha1:7da202e016b1d1dfc36a13ac03e3b419f0952cd2 + pristine_git_object: 07ad59a41f8a16b9c23c4e0be503a801ec0e2dd6 + src/mistralai/client/models/realtimetranscriptionsessionupdatepayload.py: + id: 7033fdb33ad4 + last_write_checksum: sha1:812f072a9effe1ce44e56094121ed10b3a83e39d + pristine_git_object: a89441e91dff4b7a78e8dd247b43243e89bf129d src/mistralai/client/models/referencechunk.py: id: 921acd3a224a - last_write_checksum: sha1:0dcff62499afdb1db0fd4f46614f8680f94837f4 - pristine_git_object: 7634d8ae07c96a99e634dcf888077f1d8cc4dc67 - src/mistralai/client/models/reprocessdocumentop.py: - id: b2913a7aa5c9 - last_write_checksum: sha1:07174ee58ec12909f08a08a9a6d7427ee9b2d5d0 - pristine_git_object: 48a4b72bf285e2f2e4b2d0c352ebc463518ed712 + last_write_checksum: sha1:a8bff06a2a040556bce8e6212973a774bee6bd34 + pristine_git_object: e0bbae4e08275e82010080d4ee84612e01a07f81 src/mistralai/client/models/requestsource.py: id: 3f2774d9e609 last_write_checksum: sha1:1ce68530a46793968f1122d29df722f0a5c9d267 pristine_git_object: fc4433cb4e657b06aa6a4c078094c2df342810e2 src/mistralai/client/models/responsedoneevent.py: id: cf8a686bf82c - last_write_checksum: sha1:376c2a65f1fcdfe20d7cf0bd6aa6d8870a4f32c1 - pristine_git_object: ed331ff12c8728290b8ad17e52d9384265233665 + last_write_checksum: sha1:144a8bf407391948946f3f5362db78a33c45ee6c + pristine_git_object: be38fba81c08088303c4342c99ac3628c5957785 src/mistralai/client/models/responseerrorevent.py: id: b286d74e8724 - last_write_checksum: sha1:ecff834ec62bf46d2aa5d9753f3898ed86caad45 - pristine_git_object: 8f196a52b469458da5c9f072983870da8c4fc4ea + last_write_checksum: sha1:df3f53344624082471c795131552689510946345 + pristine_git_object: fa4d0d01c1cb7f15d6f469279c2000d2fad8f459 src/mistralai/client/models/responseformat.py: id: 6ab8bc8d22c0 - last_write_checksum: sha1:e0c29239b4cd698af50412a1cab85217ccbb1796 - pristine_git_object: 409b80d658e4c93f4ee25c218fe74d65fd84ad44 + last_write_checksum: sha1:0ab455566c6bb0b63e2cb1f61f300266021f5954 + pristine_git_object: b2971412549cc5b95c53b93425dbd5b6503a4df7 src/mistralai/client/models/responseformats.py: id: c4462a05fb08 last_write_checksum: sha1:3cb82d44a4f9df5e9a3f51867be6eab1d439d87a pristine_git_object: 21345778ad2d41a3746292e67fec628f9ec2a84d src/mistralai/client/models/responsestartedevent.py: id: 24f54ee8b0f2 - last_write_checksum: sha1:8be1513409934d7ea1c524e468954f7eda0a8c62 - pristine_git_object: 256d2a6c864edf4f3ccd77b2db139c11fe4f6727 - src/mistralai/client/models/responsevalidationerror.py: - id: c244a88981e0 - last_write_checksum: sha1:74a39321dee69f3487d9b9e01ffb2e40715176f4 - pristine_git_object: 1ed0d55266a106364fe58aa1e476fafbfbbbfdfe - src/mistralai/client/models/restartconversationop.py: - id: 2f6f3e4bbfd8 - last_write_checksum: sha1:9500d3ebea040ff4a203f3f025ff1bff8a397235 - pristine_git_object: b09eaed5bc8ecdbb7f1952c97b2e665462c70f9e - src/mistralai/client/models/restartconversationstreamop.py: - id: 16dc9ee5bf22 - last_write_checksum: sha1:b16f54529f4fd7d1422c82ff1a6dd5a9a82ba6bd - pristine_git_object: 3b2025f536d1c54ed58064b4be33aaafb9297ac4 - src/mistralai/client/models/retrievefileop.py: - id: ee73efdf9180 - last_write_checksum: sha1:330ec0a78a7ba623f21cd378b53250045bea984c - pristine_git_object: edd50e571cf56c6c22acc1777f6c9af38787f07d - src/mistralai/client/models/retrievefileout.py: - id: 8bb5859aa0d0 - last_write_checksum: sha1:1077bdb8fcc5ba22b2deb7f5c95fefe7b1fb82f5 - pristine_git_object: 2abf2161cd61d84f04836740a526c0e3711c3f6d - src/mistralai/client/models/retrievemodelop.py: - id: d883baa79c9e - last_write_checksum: sha1:525c7e9cf8594433cbb21374422067a75e6b53a9 - pristine_git_object: b4334e9a5541a14f7916244761213b883d507a41 + last_write_checksum: sha1:f66a0a67444916e838ca9a63144fb661832b54b9 + pristine_git_object: 84abfcd9ac159b9bd9234ff015d5525d88d663f6 + src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py: + id: 6fefa90ca351 + last_write_checksum: sha1:52775e73fb5c51d245362ce63672cec776e5b6bd + pristine_git_object: cd5955c1eadb8cd9d1f9ecc388c2cc17df11c885 src/mistralai/client/models/sampletype.py: id: a9309422fed7 last_write_checksum: sha1:86a61340a647696f6c35a82d945509b1c85aa6f7 pristine_git_object: dfec7cce1e22ab607b6a9e947fa940284426086d - src/mistralai/client/models/sdkerror.py: - id: 12f991dad510 - last_write_checksum: sha1:c2c344c8b7e23b0c93eeafedd25d28582467c3a7 - pristine_git_object: 101e1e6a67c3672e899b39dbfe10d45550a4449a src/mistralai/client/models/security.py: id: c2ca0e2a36b7 - last_write_checksum: sha1:cec2a544790c2178f92742ac88e546efeacedb40 - pristine_git_object: 4fa8b4b2651f1d13811faf2da6e481243ea84e5a + last_write_checksum: sha1:d74333517caae2a1aa58517e8e935e46913bcc66 + pristine_git_object: f3b3423e850a1afa3b0fa5fa6c94f6018ff70627 src/mistralai/client/models/shareenum.py: id: a0e2a7a16bf8 last_write_checksum: sha1:15a84d57ceeb74cfb37275f714954e42d8e9b3ba pristine_git_object: 08ffeb7e46fbbc28b7c93ef2aa4a49aff7c0d35e src/mistralai/client/models/sharingdelete.py: id: f5ecce372e06 - last_write_checksum: sha1:c5e4e6df47ef2d5715a99533a1efd936f0e7e16e - pristine_git_object: 202732cf785074446cd24360dd9c540768e4134f + last_write_checksum: sha1:247d793bd1ddc0ad35d010c17e5b32eba826e3a1 + pristine_git_object: 33ccd7e71b8f65d2a9329d8632b5446ca0431d0a src/mistralai/client/models/sharingin.py: id: e953dda09c02 - last_write_checksum: sha1:f60bd60d37f0accadf50ea111055fd99aa190a5f - pristine_git_object: 8cc3e8968d9d5460f040ebdb66d8f460e86d2c96 + last_write_checksum: sha1:7c2b5333c634ed7889fc907edbf89c6066db5928 + pristine_git_object: 7c1a52b049db4afbd6a06b5f39966dbec4f862ba src/mistralai/client/models/sharingout.py: id: 0b8804effb5c - last_write_checksum: sha1:362bda8a5bd70d12e2de33814d3bd36a61c6d7ae - pristine_git_object: 778071546c12c2636d2deec6042e6b686b6428c6 + last_write_checksum: sha1:a78e4f6bf2f49ae8250787e1680b5004563b32ac + pristine_git_object: ab3679a4cbcc2826ff2672a09e4eaf4990b5c6a9 src/mistralai/client/models/source.py: id: fcee60a4ea0d last_write_checksum: sha1:4d4277d75f7ce001780a069898b38afa7c8addc0 @@ -2632,214 +2608,190 @@ trackedFiles: id: 1733e4765106 last_write_checksum: sha1:3c79fc7c43cd018fba4950ba013ed15899b82ebf pristine_git_object: 0add960bc93f53df5ddda94892543a0857f32dd6 - src/mistralai/client/models/startfinetuningjobop.py: - id: "663886392468" - last_write_checksum: sha1:6a6a409dd481359e8d6593fa2ea817007f8a967d - pristine_git_object: 805a8721cc7d048f172e1096ead0e410c7d04928 src/mistralai/client/models/systemmessage.py: id: 500ef6e85ba1 - last_write_checksum: sha1:af68936119bf7c067aec5215e2654c56a5df1755 - pristine_git_object: 352eca76ad5051cc2c504c673a23e048642fe018 + last_write_checksum: sha1:a88de3fc70adab47943f867336659b3a1a6cdae0 + pristine_git_object: 2602cd2db03cd129b42b343f2dc79ce68106ac35 src/mistralai/client/models/systemmessagecontentchunks.py: id: 297e8905d5af last_write_checksum: sha1:e5695ca0ebdb0f02f3a0c527015df154a0c52b7f pristine_git_object: d480a219e935aaea91adc320de0003b562c0bbb5 src/mistralai/client/models/textchunk.py: id: 9c96fb86a9ab - last_write_checksum: sha1:4ad624afaf4d83d4e58f72bcbd15b9faecc703f3 - pristine_git_object: c0584234da572bb903894633b123b1dda29e7736 + last_write_checksum: sha1:89cbb66753d7a3585ce58c70219a349f770909cc + pristine_git_object: ac9f3137dddc15e1cd10aa6385b76510e6c23e33 src/mistralai/client/models/thinkchunk.py: id: 294bfce193a4 - last_write_checksum: sha1:d9c779959ed82ae3de66e481536d80bcc2ed57a5 - pristine_git_object: a999f5d7b824325085ec980cfa07294919408538 + last_write_checksum: sha1:9126c530e93ae7532235d4bfa3e2b202423a0f24 + pristine_git_object: 5995e6010bfb63d0ab2ded6e0f55b7dca23f769a src/mistralai/client/models/timestampgranularity.py: id: 68ddf8d702ea last_write_checksum: sha1:64e7b198a75f026590e26758112651d31984076f pristine_git_object: 8d3773752444db865c0e2629ad9eed66eb7f2bc6 src/mistralai/client/models/tool.py: id: 48b4f6f50fe9 - last_write_checksum: sha1:14e7b21a2857e2ca36830730a47d0eca476fb491 - pristine_git_object: a46d31f166618fd5b92b7e76ccb9190796af7cd2 + last_write_checksum: sha1:7e33d7a0349e652b40926f6a51240b9a5c1a7dbd + pristine_git_object: 2b9965e571eeb494f8cf867818aab488198ecdb2 src/mistralai/client/models/toolcall.py: id: fb34a1a3f3c2 - last_write_checksum: sha1:15ed0a4611e8c310640ec4622af8019d2db93355 - pristine_git_object: 4a05bbd04a44446babda8419dcf4d4c93248fe41 + last_write_checksum: sha1:7d0275444dd6be291c091e908a2b7f2fc536f20f + pristine_git_object: 181cec33c904535c804de06c7357bd493647cd70 + src/mistralai/client/models/toolcallconfirmation.py: + id: f2e953cfb4fe + last_write_checksum: sha1:554a2e073917ffb479efe5887c0b59a2f4967c6e + pristine_git_object: fd6eca50a7ec2f4cca2ae20958717881660e0ac5 src/mistralai/client/models/toolchoice.py: id: 14f7e4cc35b6 - last_write_checksum: sha1:358a6e88486b4d372c9041dd15c0206b119bbc32 - pristine_git_object: aa2016fb63290c63f9b8f3e18c552f6598f15c8f + last_write_checksum: sha1:a787827a4f4ecf5b6a7068ba94fd1ff074898b51 + pristine_git_object: cb787df1b62190319c6e9679521228af28ee7204 src/mistralai/client/models/toolchoiceenum.py: id: c7798801f860 last_write_checksum: sha1:5388b2a6fad842f8e4ae79e6257b4d14c122a6ff pristine_git_object: d66c3d07058eb87bcc3eec10de99a616b5f6638a + src/mistralai/client/models/toolconfiguration.py: + id: faec24b75066 + last_write_checksum: sha1:912c1c10e88053ae4ee44af763c9ab7c95339f5d + pristine_git_object: b903c8b6c13777b671faf5aa97994117734b3a8f src/mistralai/client/models/toolexecutiondeltaevent.py: id: df8f17cf3e07 - last_write_checksum: sha1:6ad6e219f3d7512c9fd382fb22471bfaa0fc9b09 - pristine_git_object: 384ec2407848f51434ca378ad7de965c584b163b + last_write_checksum: sha1:2537a6e2dffde3760a064fdf92efa6cdc117ba2b + pristine_git_object: 5a977ca6fc5bfdeadd929f18037fb5c9a9582b40 src/mistralai/client/models/toolexecutiondoneevent.py: id: 514fdee7d99f - last_write_checksum: sha1:09ef4842c50419eda746f3361454c4df0c3c2466 - pristine_git_object: 56f28899b8b4161fcddfec0ed2610486fe6f8b06 + last_write_checksum: sha1:d62f57105e4816e03030bc9a2a5645482ea80c55 + pristine_git_object: 1c9b0ec92d87a8559ef050a21ba309e05f6b0314 src/mistralai/client/models/toolexecutionentry.py: id: 76db69eebe41 - last_write_checksum: sha1:ff84f62c5264aa023f412956cf83604ecc4112a9 - pristine_git_object: 158cbf06a2acdd492ddb91ae8eaca4802da9f359 + last_write_checksum: sha1:9a697fdad4178b95d7d1bd1eaee77ef948fb2d4f + pristine_git_object: 0d6f2a1305f262519ba719969c6e62ceb95e52b3 src/mistralai/client/models/toolexecutionstartedevent.py: id: 40fadb8e49a1 - last_write_checksum: sha1:5ba46ca1583e8245736a0ae81372025482a8504b - pristine_git_object: 1591866981ce1439fbce3736f028b15205d95810 + last_write_checksum: sha1:9f6e43d5b2c807ca3b080ea7bd4878ba3ec2a788 + pristine_git_object: 21e5bfa8fea7fa27b7031b740f72a873760700cc src/mistralai/client/models/toolfilechunk.py: id: 26c8aadf416a - last_write_checksum: sha1:1dd468876a2ff5ec8b15b6f4e6b8f812e640a29a - pristine_git_object: 6eebd562b1707b41b81e2fd0e267e4c8698551de + last_write_checksum: sha1:89bb203aa600bf6a516fbe10e1787a132de9ca5a + pristine_git_object: 0708b3ff4c4f97a0e4c4359baeedc89ef0b10278 src/mistralai/client/models/toolmessage.py: id: 15f1af161031 - last_write_checksum: sha1:809936ebaeb4541f862aed6d26e1d1f5ff0ae58a - pristine_git_object: b3e8ffd9294bf6b0b46b26097abb87a5b96c9302 + last_write_checksum: sha1:cfa16352cf5bbcd6eedbfbf7f3002149fd989418 + pristine_git_object: 05a0ee636a4393e3ce65cc1b6e272ddf8ec79254 src/mistralai/client/models/toolreferencechunk.py: id: 822e9f3e70de - last_write_checksum: sha1:f02c38c892580a6287156551e7964c601a239220 - pristine_git_object: 3c76c8c2dcc86d225c5218fa13cd43a693230fa8 + last_write_checksum: sha1:f5c9265e27fa2d4526e5ce50dff7f7bd641eb642 + pristine_git_object: 95454fe891dd3955121565431897c1b8f0c25083 src/mistralai/client/models/tooltypes.py: id: 86c3b54272fd last_write_checksum: sha1:e90c15c1e645a5f207af0c7ac728cb0a521c6706 pristine_git_object: e601c1967c42ef8d0c2eea98bc5c0ca722cde066 src/mistralai/client/models/trainingfile.py: id: 2edf9bce227d - last_write_checksum: sha1:668f05a3e3b883c2f54b1e541f1fb501605456b0 - pristine_git_object: 1f710ff81c046261ea497505d7216a1208c65d5b + last_write_checksum: sha1:8fd6a2560554b3c2166daff2ff1a48bb49053489 + pristine_git_object: 2faeda8bfb38c810c5d80eb17cc9928c49c7caf5 src/mistralai/client/models/transcriptionresponse.py: id: 60896dbc6345 - last_write_checksum: sha1:3e5c20911697f5569029932fe0910da94feb2b06 - pristine_git_object: 786863ec331a4bdca18ac056f7447d11010d4320 + last_write_checksum: sha1:e8a318798dfe4ebd64c9d64f487f7e3e8dd05532 + pristine_git_object: 70315463ff8e01c680aa80d68bdc32a7429ddb16 src/mistralai/client/models/transcriptionsegmentchunk.py: id: d1e6f3bdc74b - last_write_checksum: sha1:0107b6ee9160cd2a8309f7c8465502d7d0be90a8 - pristine_git_object: c78bec3068b95782bdc271c2e1ee645b115fed32 + last_write_checksum: sha1:ee56c437444cbfa7983ba950e3e166f392d208cb + pristine_git_object: b87bfc2f9de0a07d62e8cc1fe265a9c29f56f194 src/mistralai/client/models/transcriptionstreamdone.py: id: 066a9158ed09 - last_write_checksum: sha1:3a6abc6f1a0ad78d73e32f3d40ef4bb425aee5b5 - pristine_git_object: b5740b3bb62b4db3846b7727b15e18502e39d862 + last_write_checksum: sha1:cb8ea2e34c712ef1694bd1b6a83e7eed9318b13b + pristine_git_object: e3c5016901a2400c222e5b821b5afb312af1a1e6 src/mistralai/client/models/transcriptionstreamevents.py: id: b50b3d74f16f - last_write_checksum: sha1:f688a18317bd048ad89881c35cb80e39bb7cba47 - pristine_git_object: 17161a177721e44a40903cf15bf08ad0b56545de + last_write_checksum: sha1:68f82eea8a0bcf1b8b65cedf9e276f34121d398b + pristine_git_object: 073fd99aebf6f90027a45c8ee4daa7ffeb8ee34e src/mistralai/client/models/transcriptionstreameventtypes.py: id: 6f71f6fbf4c5 last_write_checksum: sha1:1d568460b1521f17dd5e551632ae4d7883a98dd3 pristine_git_object: c74bbb7483cc3981ee3638c80c15924f3e1c20c4 src/mistralai/client/models/transcriptionstreamlanguage.py: id: e94333e4bc27 - last_write_checksum: sha1:17c7b082ebf5764e21f124fe4c6a6ee5cea4fc51 - pristine_git_object: 67b3e9791efaf134580d82c2a12fab1cd33efbb1 + last_write_checksum: sha1:d1ee93b09ca377bc29845924d53db3ccf250269d + pristine_git_object: b6c6190684eccdc3fe6ce4bc7b86f5ee6490a197 src/mistralai/client/models/transcriptionstreamsegmentdelta.py: id: c0a882ce57e5 - last_write_checksum: sha1:12cbfcf02d5cb4979a836e429690786153250bf0 - pristine_git_object: 8db5e73619eab98c3751689a7ec5bef45ef9ef6b + last_write_checksum: sha1:3507a0355027136e92ada0c9766277381d5dee96 + pristine_git_object: 32ef8f9b2aa34253ea10c830ae856a931306f658 src/mistralai/client/models/transcriptionstreamtextdelta.py: id: 6086dc081147 - last_write_checksum: sha1:6b371b5d236e6e767f25160ab0e8a49bcaf356f8 - pristine_git_object: 49338a083332467e64f171637ca04365ca6bf25b - src/mistralai/client/models/unarchiveftmodelout.py: - id: 9dbc3bfb71ed - last_write_checksum: sha1:40a23dc39af81f06b23f21dad45c5c5f1178b2af - pristine_git_object: 0249a69e8552ed00a5e1f505fdc16025c46d2477 - src/mistralai/client/models/unarchivemodelop.py: - id: eb18584fd78c - last_write_checksum: sha1:5b81357950f301a82233b58a3e2a5b232fdbf546 - pristine_git_object: 1d68a06ae41559baffb6d54398b52dec630556c7 - src/mistralai/client/models/updateagentop.py: - id: ae3a6abea468 - last_write_checksum: sha1:3867948bd0ea37b597c4e5ef7a2e6881791a5fa5 - pristine_git_object: 28acc83d8df1373e897f9634dfbb84ee28897717 - src/mistralai/client/models/updateagentversionop.py: - id: 3821dca5b20a - last_write_checksum: sha1:4c41a450278858089c7cb23b8fcf1e4184fa1f1d - pristine_git_object: 114013bcdcfb7d7c9e935285f167a004b65fbd09 - src/mistralai/client/models/updatedocumentop.py: - id: eee9ef317180 - last_write_checksum: sha1:7d9fc6e18e6631cfe9cd1bc2fa5f7d6cd599ec19 - pristine_git_object: 073f22a9a28c18ad645212262bdc66528a1f6281 - src/mistralai/client/models/updateftmodelin.py: - id: 39e2d678e651 - last_write_checksum: sha1:4ea30ed8eaad36e1440614016f075f088c7e5781 - pristine_git_object: 4ac5a8a24026f6a975044de01a9918364aa64e04 - src/mistralai/client/models/updatelibraryop.py: - id: 4ba7acdb62c6 - last_write_checksum: sha1:3816c8eff226634b545843eed2d0c15fa1579308 - pristine_git_object: c5a1ad30e9bfc277cbbcdea0218a265ad10bcb96 - src/mistralai/client/models/updatemodelop.py: - id: ba149ecfe03e - last_write_checksum: sha1:2ce33ac60846a5ef70141dccbdb09950c3d1e348 - pristine_git_object: 023be97905929aa2925f20cd69b3591e6e3168d7 - src/mistralai/client/models/updateorcreatelibraryaccessop.py: - id: ec9b15418f5c - last_write_checksum: sha1:82fe6bbbb1402f03b7c0380c5fd84a8fef9bec67 - pristine_git_object: 1abe6eda3eb7d0aff8a7c146c848a63e299cedf1 - src/mistralai/client/models/uploaddocumentop.py: - id: 0018fe7ff48c - last_write_checksum: sha1:f31d565f419cbcc59af0655753cee6c643ad307a - pristine_git_object: 2c957947830ae0d467084cc6502d9d97ffdf6c81 - src/mistralai/client/models/uploadfileop.py: - id: d67619670938 - last_write_checksum: sha1:00664ba8af70ffc96871eee02890411475ca6c37 - pristine_git_object: 50848f0b663f60f9a303010f3c940919939949c9 - src/mistralai/client/models/uploadfileout.py: - id: 42466f2bebfb - last_write_checksum: sha1:44d0e5d419fb82c56c33c0f9af8902b3cc06bf6d - pristine_git_object: be291efb523965c155fc922d896da2cf682378ab + last_write_checksum: sha1:968b4bc32731be6c63be3fd90eb26f4357f891a3 + pristine_git_object: 42f0ffb7f16bee4f68f9db9807aa4ec3d9ae5176 + src/mistralai/client/models/unarchivemodelresponse.py: + id: 22e2ccbb0c80 + last_write_checksum: sha1:a69d8dc8636f3326eb61892b85a9b60044b457fe + pristine_git_object: 5c75d30edaade853f085533da0f9f5de221b6e44 + src/mistralai/client/models/updateagentrequest.py: + id: 914b4b2be67a + last_write_checksum: sha1:f37178288254e905ce298befbe801fa6ba63ec0e + pristine_git_object: b751ff74396ca0e74411a7a1549c6e0b4988fc49 + src/mistralai/client/models/updatedocumentrequest.py: + id: a8cfda07d337 + last_write_checksum: sha1:c644725ae379f22550d00b42baefb511d1cc3667 + pristine_git_object: 61e696555c0654208b0d9dcd63fc475ad85297d4 + src/mistralai/client/models/updatelibraryrequest.py: + id: 51bc63885337 + last_write_checksum: sha1:622d6a7af58d2e86d7d2dd4e312883d11ce5a8a8 + pristine_git_object: 91cbf2a1c76361c9c5ee1554c80f1507ff5ee50b + src/mistralai/client/models/updatemodelrequest.py: + id: fe649967751e + last_write_checksum: sha1:dbba8a6ccbfae36ac56808742f4c05ab99dd2c6c + pristine_git_object: f685cfcce1aa3669159fec902ba78034ef3141b8 src/mistralai/client/models/usageinfo.py: id: 54adb9a3af16 - last_write_checksum: sha1:fcfdc921bbcc78436ef156dd7a2eff1123c4036f - pristine_git_object: e78f92e75f86fd593469f7267aad72e417178161 + last_write_checksum: sha1:04705526057c43495284fe9c50cf7df2af7b49fd + pristine_git_object: 31cbf07e3e38df4452da320e44f3fa9aef17c196 src/mistralai/client/models/usermessage.py: id: cb583483acf4 - last_write_checksum: sha1:215406ca76602e899620ef763e216d71f8cd9fcd - pristine_git_object: 25ccdf805e9fbc65da7b6d0051f13224cf0e04fa + last_write_checksum: sha1:0060ee5f5fbbd78073cd56546127a021354a8072 + pristine_git_object: 63e7679246a11fe8e7a3db06e382779c05c64366 src/mistralai/client/models/validationerror.py: id: 15df3c7368ab last_write_checksum: sha1:63df5739d68f984470d4d1b8661a875201cc301d pristine_git_object: 385714c8cb80a8afbca6d5142a2d378d0d165cf9 src/mistralai/client/models/wandbintegration.py: id: 4823c1e80942 - last_write_checksum: sha1:6391a293368ba6fa98114ce510a7665b47d82721 - pristine_git_object: c5db4a6d409f1d84d356a471995119a070db627a - src/mistralai/client/models/wandbintegrationout.py: - id: 6b103d74195c - last_write_checksum: sha1:37caaf5224b216826c48912538959baa0a7d997a - pristine_git_object: d0a09bf48c3a24f5382a626d26897afe2d680f7e + last_write_checksum: sha1:cc0a7ce49756928f4d261375526a3498b9e4f05d + pristine_git_object: f0df2c77845b2741802730fcd4f3c5d31b7ddd8e + src/mistralai/client/models/wandbintegrationresult.py: + id: 8787b4ad5458 + last_write_checksum: sha1:6ba506e01333a3084f63fbfccb459235b6560554 + pristine_git_object: 575cbd42297f02a54542c7eda3a4cabaa28dda23 src/mistralai/client/models/websearchpremiumtool.py: id: bfe88af887e3 - last_write_checksum: sha1:9f9b4bfeea780cec16b9457ee800524c3eba7a4b - pristine_git_object: 9588ab1d7361d3ab1cba2f16e74695273cc03557 + last_write_checksum: sha1:ceb073d3b3916b2ff8f7b7e5eb01692893024d68 + pristine_git_object: 00d4a4b427331660d29513ec43e68fc7cf8afcfb src/mistralai/client/models/websearchtool.py: id: 26b0903423e5 - last_write_checksum: sha1:9afaf3738be10d0a401b34e15db25612ee33465f - pristine_git_object: 27502909ea608f8e0b4a71484da94d26209e0c07 + last_write_checksum: sha1:a07d7ace2d68c944c686e69053bef8d84231814b + pristine_git_object: 6871080f6279ef42a0525c1e26368baafc98fbb7 src/mistralai/client/models_.py: id: 1d277958a843 - last_write_checksum: sha1:f50e7b7194f97de4abf0afd70b5e1c52b805cef6 - pristine_git_object: 05b33ac72da14401b700c4abfb28ca33b5af702b + last_write_checksum: sha1:b9ea906a7704aa57efe5d13ac547e502d961d3b5 + pristine_git_object: a287c413ddf48bd5ff7fc0a685e05d4bcdabb6e5 src/mistralai/client/ocr.py: id: 2f804a12fc62 - last_write_checksum: sha1:2cfde7a27733502b87690c1025adefe5b717da57 - pristine_git_object: 2aa382295a9f1561021a36f3a68a9fb505cfe536 + last_write_checksum: sha1:707d91582149e76a3109df8b1a58bfd44111a93d + pristine_git_object: a46119d1577036be57896a7ea3737ab508497e4f src/mistralai/client/py.typed: id: d95cd1565e33 last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 src/mistralai/client/sdk.py: id: 48edbcb38d7e - last_write_checksum: sha1:be11dc3f70c773dd5c6baba6b3fafd996c5baec2 - pristine_git_object: b1ab54935a3421008c78f4864bd6097c0a098040 + last_write_checksum: sha1:365709e35dc4e450a2c4931e75dcbd04568ab361 + pristine_git_object: 80bf25a749eb3b36035aaafa15f059bcf403ec80 src/mistralai/client/sdkconfiguration.py: id: b7dd68a0235e last_write_checksum: sha1:c6944f12c6fdc992d43db943b24c8c90854cde5e pristine_git_object: 712e92e05c7fd3016431ec62ecb7b7789c8b7071 src/mistralai/client/transcriptions.py: id: 75b45780c978 - last_write_checksum: sha1:b47a3765f2191715fc19bdbc4e56414abbe59f4b - pristine_git_object: f7ef5b0a0769467bd4bea61f7b0dca3b68c3788d + last_write_checksum: sha1:27a5b7dd6ed47b0f79b95fbb8599d439512ef344 + pristine_git_object: 7f01917d6e462cff9af75e70d32afbcc5958c7de src/mistralai/client/types/__init__.py: id: 000b943f821c last_write_checksum: sha1:12a4ace69cbc63f1125eeddf901afed7cdf378b0 @@ -2850,8 +2802,8 @@ trackedFiles: pristine_git_object: 4e889aa0ffbb4402e416a40fa6259334cb0a3c5c src/mistralai/client/utils/__init__.py: id: b69505f4b269 - last_write_checksum: sha1:adb457b85659a04945857a74407306dafbdce7cb - pristine_git_object: 7ed3a42095b5921adf0e154ae6eba560a1098233 + last_write_checksum: sha1:98698da73839db7c258fd1afd45ccacff86c64be + pristine_git_object: 4bde281a1fd8c616d4b3529af0fcb79f57374310 src/mistralai/client/utils/annotations.py: id: 1ffdedfc66a2 last_write_checksum: sha1:f86ba37de752e63076f25d53f9c54fce98d2a0bd @@ -2860,18 +2812,22 @@ trackedFiles: id: c40066d868c9 last_write_checksum: sha1:412ca432d6f5a75b692a967bc6fc52e4f4eff7d5 pristine_git_object: a2c94fac73ecbfb8acd8ed4f75692318e4f863ec + src/mistralai/client/utils/dynamic_imports.py: + id: ac9918d925c0 + last_write_checksum: sha1:93d3eac90a47a039e7a652ae120bec66be6c681a + pristine_git_object: 969f2fc71178ed2114640c8f0831f4f3acb25af8 src/mistralai/client/utils/enums.py: id: a0735873b5ac last_write_checksum: sha1:fe05b6a21360b0eff1fc246e9a3ee01758521262 pristine_git_object: d897495f053459106144501c67f2215251d52a27 src/mistralai/client/utils/eventstreaming.py: id: 3263d7502030 - last_write_checksum: sha1:0e15051d74262fbe051e1ba83fd1f2c0c0a016a0 - pristine_git_object: 3fe3c7e13509d6fab08fbb8504c6c5f674c2b679 + last_write_checksum: sha1:24af3168dafe6b8d860cffb121fac11cd0e9d930 + pristine_git_object: 19a121529f180968f655baffbe446e5c1d6c2abb src/mistralai/client/utils/forms.py: id: 58842e905fce - last_write_checksum: sha1:c7929d974f46629b56e740456ddf03230b4048ab - pristine_git_object: 2b474b9a719e95c4bcae8572e5569e64f8d0b77f + last_write_checksum: sha1:d68ca0257e0e8bdc5cdc450f3e70a7ba789859f5 + pristine_git_object: 6facec5386675ccd5a26ff6093f98436a62fdf6b src/mistralai/client/utils/headers.py: id: 9066de2ead8b last_write_checksum: sha1:bcd2f47b96bfaa54b3590c557a9267142d446be6 @@ -2894,20 +2850,24 @@ trackedFiles: pristine_git_object: 3aae69c7cf618776daec8bd46f9116b06c25e837 src/mistralai/client/utils/retries.py: id: 5f1a5b90423c - last_write_checksum: sha1:94a86f31092553d4640a54c446cfe9028b4fb6ef - pristine_git_object: 90c008b0e20c1a539d65ffb387fb61a724c3c111 + last_write_checksum: sha1:bbf8e376c1c801911e65e33566d3a142f46133f9 + pristine_git_object: bea1304150e77ca06185efb7db7798aaacd5e623 src/mistralai/client/utils/security.py: id: 1acb7c006265 - last_write_checksum: sha1:e8543609e699dab330a4768786883c6ca38f07a6 - pristine_git_object: 4c73806d9c8e54a2a4cfe8f62d8c281177789f6f + last_write_checksum: sha1:3981f6571daf28b3b553beb09a4ebeeeb6ceff14 + pristine_git_object: d8b9d8fe746babd0a87846812b1f4117d1a46de2 src/mistralai/client/utils/serializers.py: id: 53c57c7f29a8 last_write_checksum: sha1:8a3a15cf273034261111f2559cacbb579e17cb1b pristine_git_object: fbc2772dc4284775be92de6a086c1eade9376417 + src/mistralai/client/utils/unions.py: + id: d23713342634 + last_write_checksum: sha1:f814d757474f039199f501aa53cdfba97a8c6645 + pristine_git_object: 14ef1bd5c5abef9bd5f2a3a4ee2f79e954c67e7e src/mistralai/client/utils/unmarshal_json_response.py: id: b13585fc5626 - last_write_checksum: sha1:c0c44d0a656477daa225724e88a7cf5c954a1df6 - pristine_git_object: 65190e5c1d70a31f51656e1644bb701b9f132bcd + last_write_checksum: sha1:372a01f5abf034ddbe5d4a3fc68e9e397f86085a + pristine_git_object: 624433c4dd42c9fb1bfae363becc76c62e390e14 src/mistralai/client/utils/url.py: id: 3c6496c17510 last_write_checksum: sha1:c64be472d29cf229f2b91102808dcb741371c227 @@ -3024,7 +2984,7 @@ examples: path: conversation_id: "" requestBody: - application/json: {"inputs": [], "stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} + application/json: {"stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} responses: "200": application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} @@ -3056,7 +3016,7 @@ examples: path: conversation_id: "" requestBody: - application/json: {"inputs": "", "stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} + application/json: {"stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} responses: "200": application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} @@ -3075,7 +3035,7 @@ examples: path: conversation_id: "" requestBody: - application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} + application/json: {"stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} responses: "422": application/json: {} @@ -3085,7 +3045,7 @@ examples: path: conversation_id: "" requestBody: - application/json: {"inputs": [{"object": "entry", "type": "message.input", "role": "assistant", "content": "", "prefix": false}], "stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} + application/json: {"stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} responses: "422": application/json: {} @@ -3279,6 +3239,7 @@ examples: page: 0 page_size: 100 created_by_me: false + order_by: "-created" responses: "200": application/json: {"object": "list", "total": 186589} @@ -3308,7 +3269,7 @@ examples: chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} @@ -3317,7 +3278,7 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} responses: "422": application/json: {} @@ -3335,7 +3296,7 @@ examples: application/json: {"model": "codestral-latest", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} responses: "200": - application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"role": "assistant", "content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false}, "finish_reason": "stop"}]} stream_fim: speakeasy-default-stream-fim: requestBody: @@ -3354,14 +3315,14 @@ examples: application/json: {} userExample: requestBody: - application/json: {"stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}, "agent_id": ""} + application/json: {"stream": false, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}, "agent_id": ""} responses: "200": - application/json: {"id": "cf79f7daaee244b1a0ae5c7b1444424a", "object": "chat.completion", "model": "mistral-medium-latest", "usage": {"prompt_tokens": 24, "completion_tokens": 27, "total_tokens": 51, "prompt_audio_seconds": {}}, "created": 1759500534, "choices": [{"index": 0, "message": {"content": "Arrr, the scallywag Claude Monet be the finest French painter to ever splash colors on a canvas, savvy?", "tool_calls": null, "prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "cf79f7daaee244b1a0ae5c7b1444424a", "object": "chat.completion", "model": "mistral-medium-latest", "usage": {"prompt_tokens": 24, "completion_tokens": 27, "total_tokens": 51, "prompt_audio_seconds": {}}, "created": 1759500534, "choices": [{"index": 0, "message": {"role": "assistant", "content": "Arrr, the scallywag Claude Monet be the finest French painter to ever splash colors on a canvas, savvy?", "tool_calls": null, "prefix": false}, "finish_reason": "stop"}]} stream_agents: speakeasy-default-stream-agents: requestBody: - application/json: {"stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}, "agent_id": ""} + application/json: {"stream": true, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}, "agent_id": ""} responses: "422": application/json: {} @@ -3406,7 +3367,7 @@ examples: application/json: {} userExample: requestBody: - application/json: {"input": [{"content": "", "role": "tool"}], "model": "LeBaron"} + application/json: {"input": [{"role": "tool", "content": ""}], "model": "LeBaron"} responses: "200": application/json: {"id": "352bce1a55814127a3b0bc4fb8f02a35", "model": "mistral-moderation-latest", "results": [{"categories": {"sexual": false, "hate_and_discrimination": false, "violence_and_threats": false, "dangerous_and_criminal_content": false, "selfharm": false, "health": false, "financial": false, "law": false, "pii": false}, "category_scores": {"sexual": 0.0010322310263291001, "hate_and_discrimination": 0.001597845577634871, "violence_and_threats": 0.00020342698553577065, "dangerous_and_criminal_content": 0.0029810327105224133, "selfharm": 0.00017952796770259738, "health": 0.0002959570847451687, "financial": 0.000079673009167891, "law": 0.00004539786823443137, "pii": 0.004198795650154352}}]} @@ -3422,7 +3383,7 @@ examples: chat_classifications_v1_chat_classifications_post: speakeasy-default-chat-classifications-v1-chat-classifications-post: requestBody: - application/json: {"model": "Camry", "input": [{"messages": [{"content": "", "role": "system"}]}]} + application/json: {"model": "Camry", "input": [{"messages": [{"role": "system", "content": ""}]}]} responses: "200": application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Altima", "results": [{}, {"key": {"scores": {"key": 1360.53, "key1": 5946.42}}}, {"key": {"scores": {"key": 1360.53, "key1": 5946.42}}}]} @@ -3439,7 +3400,7 @@ examples: application/json: {} userExample: requestBody: - application/json: {"model": "CX-9", "document": {"document_url": "https://upset-labourer.net/", "type": "document_url"}, "bbox_annotation_format": {"type": "text"}, "document_annotation_format": {"type": "text"}} + application/json: {"model": "CX-9", "document": {"type": "document_url", "document_url": "https://upset-labourer.net/"}, "bbox_annotation_format": {"type": "text"}, "document_annotation_format": {"type": "text"}} responses: "200": application/json: {"pages": [{"index": 1, "markdown": "# LEVERAGING UNLABELED DATA TO PREDICT OUT-OF-DISTRIBUTION PERFORMANCE\nSaurabh Garg*
Carnegie Mellon University
sgarg2@andrew.cmu.edu
Sivaraman Balakrishnan
Carnegie Mellon University
sbalakri@andrew.cmu.edu
Zachary C. Lipton
Carnegie Mellon University
zlipton@andrew.cmu.edu\n## Behnam Neyshabur\nGoogle Research, Blueshift team
neyshabur@google.com\nHanie Sedghi
Google Research, Brain team
hsedghi@google.com\n#### Abstract\nReal-world machine learning deployments are characterized by mismatches between the source (training) and target (test) distributions that may cause performance drops. In this work, we investigate methods for predicting the target domain accuracy using only labeled source data and unlabeled target data. We propose Average Thresholded Confidence (ATC), a practical method that learns a threshold on the model's confidence, predicting accuracy as the fraction of unlabeled examples for which model confidence exceeds that threshold. ATC outperforms previous methods across several model architectures, types of distribution shifts (e.g., due to synthetic corruptions, dataset reproduction, or novel subpopulations), and datasets (WILDS, ImageNet, BREEDS, CIFAR, and MNIST). In our experiments, ATC estimates target performance $2-4 \\times$ more accurately than prior methods. We also explore the theoretical foundations of the problem, proving that, in general, identifying the accuracy is just as hard as identifying the optimal predictor and thus, the efficacy of any method rests upon (perhaps unstated) assumptions on the nature of the shift. Finally, analyzing our method on some toy distributions, we provide insights concerning when it works ${ }^{1}$.\n## 1 INTRODUCTION\nMachine learning models deployed in the real world typically encounter examples from previously unseen distributions. While the IID assumption enables us to evaluate models using held-out data from the source distribution (from which training data is sampled), this estimate is no longer valid in presence of a distribution shift. Moreover, under such shifts, model accuracy tends to degrade (Szegedy et al., 2014; Recht et al., 2019; Koh et al., 2021). Commonly, the only data available to the practitioner are a labeled training set (source) and unlabeled deployment-time data which makes the problem more difficult. In this setting, detecting shifts in the distribution of covariates is known to be possible (but difficult) in theory (Ramdas et al., 2015), and in practice (Rabanser et al., 2018). However, producing an optimal predictor using only labeled source and unlabeled target data is well-known to be impossible absent further assumptions (Ben-David et al., 2010; Lipton et al., 2018).\nTwo vital questions that remain are: (i) the precise conditions under which we can estimate a classifier's target-domain accuracy; and (ii) which methods are most practically useful. To begin, the straightforward way to assess the performance of a model under distribution shift would be to collect labeled (target domain) examples and then to evaluate the model on that data. However, collecting fresh labeled data from the target distribution is prohibitively expensive and time-consuming, especially if the target distribution is non-stationary. Hence, instead of using labeled data, we aim to use unlabeled data from the target distribution, that is comparatively abundant, to predict model performance. Note that in this work, our focus is not to improve performance on the target but, rather, to estimate the accuracy on the target for a given classifier.\n[^0]: Work done in part while Saurabh Garg was interning at Google ${ }^{1}$ Code is available at [https://github.com/saurabhgarg1996/ATC_code](https://github.com/saurabhgarg1996/ATC_code).\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 2, "markdown": "![img-0.jpeg](img-0.jpeg)\nFigure 1: Illustration of our proposed method ATC. Left: using source domain validation data, we identify a threshold on a score (e.g. negative entropy) computed on model confidence such that fraction of examples above the threshold matches the validation set accuracy. ATC estimates accuracy on unlabeled target data as the fraction of examples with the score above the threshold. Interestingly, this threshold yields accurate estimates on a wide set of target distributions resulting from natural and synthetic shifts. Right: Efficacy of ATC over previously proposed approaches on our testbed with a post-hoc calibrated model. To obtain errors on the same scale, we rescale all errors with Average Confidence (AC) error. Lower estimation error is better. See Table 1 for exact numbers and comparison on various types of distribution shift. See Sec. 5 for details on our testbed.\nRecently, numerous methods have been proposed for this purpose (Deng & Zheng, 2021; Chen et al., 2021b; Jiang et al., 2021; Deng et al., 2021; Guillory et al., 2021). These methods either require calibration on the target domain to yield consistent estimates (Jiang et al., 2021; Guillory et al., 2021) or additional labeled data from several target domains to learn a linear regression function on a distributional distance that then predicts model performance (Deng et al., 2021; Deng & Zheng, 2021; Guillory et al., 2021). However, methods that require calibration on the target domain typically yield poor estimates since deep models trained and calibrated on source data are not, in general, calibrated on a (previously unseen) target domain (Ovadia et al., 2019). Besides, methods that leverage labeled data from target domains rely on the fact that unseen target domains exhibit strong linear correlation with seen target domains on the underlying distance measure and, hence, can be rendered ineffective when such target domains with labeled data are unavailable (in Sec. 5.1 we demonstrate such a failure on a real-world distribution shift problem). Therefore, throughout the paper, we assume access to labeled source data and only unlabeled data from target domain(s).\nIn this work, we first show that absent assumptions on the source classifier or the nature of the shift, no method of estimating accuracy will work generally (even in non-contrived settings). To estimate accuracy on target domain perfectly, we highlight that even given perfect knowledge of the labeled source distribution (i.e., $p_{s}(x, y)$ ) and unlabeled target distribution (i.e., $p_{t}(x)$ ), we need restrictions on the nature of the shift such that we can uniquely identify the target conditional $p_{t}(y \\mid x)$. Thus, in general, identifying the accuracy of the classifier is as hard as identifying the optimal predictor.\nSecond, motivated by the superiority of methods that use maximum softmax probability (or logit) of a model for Out-Of-Distribution (OOD) detection (Hendrycks & Gimpel, 2016; Hendrycks et al., 2019), we propose a simple method that leverages softmax probability to predict model performance. Our method, Average Thresholded Confidence (ATC), learns a threshold on a score (e.g., maximum confidence or negative entropy) of model confidence on validation source data and predicts target domain accuracy as the fraction of unlabeled target points that receive a score above that threshold. ATC selects a threshold on validation source data such that the fraction of source examples that receive the score above the threshold match the accuracy of those examples. Our primary contribution in ATC is the proposal of obtaining the threshold and observing its efficacy on (practical) accuracy estimation. Importantly, our work takes a step forward in positively answering the question raised in Deng & Zheng (2021); Deng et al. (2021) about a practical strategy to select a threshold that enables accuracy prediction with thresholded model confidence.\n", "images": [{"id": "img-0.jpeg", "top_left_x": 292, "top_left_y": 217, "bottom_right_x": 1405, "bottom_right_y": 649, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 3, "markdown": "", "images": [], "dimensions": {"dpi": 539192, "height": 944919, "width": 247256}}, {"index": 27, "markdown": "![img-8.jpeg](img-8.jpeg)\nFigure 9: Scatter plot of predicted accuracy versus (true) OOD accuracy for vision datasets except MNIST with a ResNet50 model. Results reported by aggregating MAE numbers over 4 different seeds.\n", "images": [{"id": "img-8.jpeg", "top_left_x": 290, "top_left_y": 226, "bottom_right_x": 1405, "bottom_right_y": 1834, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 28, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 6.60 | 5.74 | 9.88 | 6.89 | 7.25 | 6.07 | 4.77 | 3.21 | 3.02 | 2.99 | 2.85 | | | | (0.35) | (0.30) | (0.16) | (0.13) | (0.15) | (0.16) | (0.13) | (0.49) | (0.40) | (0.37) | (0.29) | | | Synthetic | 12.33 | 10.20 | 16.50 | 11.91 | 13.87 | 11.08 | 6.55 | 4.65 | 4.25 | 4.21 | 3.87 | | | | (0.51) | (0.48) | (0.26) | (0.17) | (0.18) | (0.17) | (0.35) | (0.55) | (0.55) | (0.55) | (0.75) | | CIFAR100 | Synthetic | 13.69 | 11.51 | 23.61 | 13.10 | 14.60 | 10.14 | 9.85 | 5.50 | 4.75 | 4.72 | 4.94 | | | | (0.55) | (0.41) | (1.16) | (0.80) | (0.77) | (0.64) | (0.57) | (0.70) | (0.73) | (0.74) | (0.74) | | ImageNet200 | Natural | 12.37 | 8.19 | 22.07 | 8.61 | 15.17 | 7.81 | 5.13 | 4.37 | 2.04 | 3.79 | 1.45 | | | | (0.25) | (0.33) | (0.08) | (0.25) | (0.11) | (0.29) | (0.08) | (0.39) | (0.24) | (0.30) | (0.27) | | | Synthetic | 19.86 | 12.94 | 32.44 | 13.35 | 25.02 | 12.38 | 5.41 | 5.93 | 3.09 | 5.00 | 2.68 | | | | (1.38) | (1.81) | (1.00) | (1.30) | (1.10) | (1.38) | (0.89) | (1.38) | (0.87) | (1.28) | (0.45) | | ImageNet | Natural | 7.77 | 6.50 | 18.13 | 6.02 | 8.13 | 5.76 | 6.23 | 3.88 | 2.17 | 2.06 | 0.80 | | | | (0.27) | (0.33) | (0.23) | (0.34) | (0.27) | (0.37) | (0.41) | (0.53) | (0.62) | (0.54) | (0.44) | | | Synthetic | 13.39 | 10.12 | 24.62 | 8.51 | 13.55 | 7.90 | 6.32 | 3.34 | 2.53 | 2.61 | 4.89 | | | | (0.53) | (0.63) | (0.64) | (0.71) | (0.61) | (0.72) | (0.33) | (0.53) | (0.36) | (0.33) | (0.83) | | FMoW-WILDS | Natural | 5.53 | 4.31 | 33.53 | 12.84 | 5.94 | 4.45 | 5.74 | 3.06 | 2.70 | 3.02 | 2.72 | | | | (0.33) | (0.63) | (0.13) | (12.06) | (0.36) | (0.77) | (0.55) | (0.36) | (0.54) | (0.35) | (0.44) | | RxRx1-WILDS | Natural | 5.80 | 5.72 | 7.90 | 4.84 | 5.98 | 5.98 | 6.03 | 4.66 | 4.56 | 4.41 | 4.47 | | | | (0.17) | (0.15) | (0.24) | (0.09) | (0.15) | (0.13) | (0.08) | (0.38) | (0.38) | (0.31) | (0.26) | | Amazon-WILDS | Natural | 2.40 | 2.29 | 8.01 | 2.38 | 2.40 | 2.28 | 17.87 | 1.65 | 1.62 | 1.60 | 1.59 | | | | (0.08) | (0.09) | (0.53) | (0.17) | (0.09) | (0.09) | (0.18) | (0.06) | (0.05) | (0.14) | (0.15) | | CivilCom.-WILDS | Natural | 12.64 | 10.80 | 16.76 | 11.03 | 13.31 | 10.99 | 16.65 | | 7.14 | | | | | | (0.52) | (0.48) | (0.53) | (0.49) | (0.52) | (0.49) | (0.25) | | (0.41) | | | | MNIST | Natural | 18.48 | 15.99 | 21.17 | 14.81 | 20.19 | 14.56 | 24.42 | 5.02 | 2.40 | 3.14 | 3.50 | | | | (0.45) | (1.53) | (0.24) | (3.89) | (0.23) | (3.47) | (0.41) | (0.44) | (1.83) | (0.49) | (0.17) | | ENTITY-13 | Same | 16.23 | 11.14 | 24.97 | 10.88 | 19.08 | 10.47 | 10.71 | 5.39 | 3.88 | 4.58 | 4.19 | | | | (0.77) | (0.65) | (0.70) | (0.77) | (0.65) | (0.72) | (0.74) | (0.92) | (0.61) | (0.85) | (0.16) | | | Novel | 28.53 | 22.02 | 38.33 | 21.64 | 32.43 | 21.22 | 20.61 | 13.58 | 10.28 | 12.25 | 6.63 | | | | (0.82) | (0.68) | (0.75) | (0.86) | (0.69) | (0.80) | (0.60) | (1.15) | (1.34) | (1.21) | (0.93) | | ENTITY-30 | Same | 18.59 | 14.46 | 28.82 | 14.30 | 21.63 | 13.46 | 12.92 | 9.12 | 7.75 | 8.15 | 7.64 | | | | (0.51) | (0.52) | (0.43) | (0.71) | (0.37) | (0.59) | (0.14) | (0.62) | (0.72) | (0.68) | (0.88) | | | Novel | 32.34 | 26.85 | 44.02 | 26.27 | 36.82 | 25.42 | 23.16 | 17.75 | 14.30 | 15.60 | 10.57 | | | | (0.60) | (0.58) | (0.56) | (0.79) | (0.47) | (0.68) | (0.12) | (0.76) | (0.85) | (0.86) | (0.86) | | NONLIVING-26 | Same | 18.66 | 17.17 | 26.39 | 16.14 | 19.86 | 15.58 | 16.63 | 10.87 | 10.24 | 10.07 | 10.26 | | | | (0.76) | (0.74) | (0.82) | (0.81) | (0.67) | (0.76) | (0.45) | (0.98) | (0.83) | (0.92) | (1.18) | | | Novel | 33.43 | 31.53 | 41.66 | 29.87 | 35.13 | 29.31 | 29.56 | 21.70 | 20.12 | 19.08 | 18.26 | | | | (0.67) | (0.65) | (0.67) | (0.71) | (0.54) | (0.64) | (0.21) | (0.86) | (0.75) | (0.82) | (1.12) | | LIVING-17 | Same | 12.63 | 11.05 | 18.32 | 10.46 | 14.43 | 10.14 | 9.87 | 4.57 | 3.95 | 3.81 | 4.21 | | | | (1.25) | (1.20) | (1.01) | (1.12) | (1.11) | (1.16) | (0.61) | (0.71) | (0.48) | (0.22) | (0.53) | | | Novel | 29.03 | 26.96 | 35.67 | 26.11 | 31.73 | 25.73 | 23.53 | 16.15 | 14.49 | 12.97 | 11.39 | | | | (1.44) | (1.38) | (1.09) | (1.27) | (1.19) | (1.35) | (0.52) | (1.36) | (1.46) | (1.52) | (1.72) |\nTable 3: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. For language datasets, we use DistilBERT-base-uncased, for vision dataset we report results with DenseNet model with the exception of MNIST where we use FCN. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 29, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 7.14 | 6.20 | 10.25 | 7.06 | 7.68 | 6.35 | 5.74 | 4.02 | 3.85 | 3.76 | 3.38 | | | | (0.14) | (0.11) | (0.31) | (0.33) | (0.28) | (0.27) | (0.25) | (0.38) | (0.30) | (0.33) | (0.32) | | | Synthetic | 12.62 | 10.75 | 16.50 | 11.91 | 13.93 | 11.20 | 7.97 | 5.66 | 5.03 | 4.87 | 3.63 | | | | (0.76) | (0.71) | (0.28) | (0.24) | (0.29) | (0.28) | (0.13) | (0.64) | (0.71) | (0.71) | (0.62) | | CIFAR100 | Synthetic | 12.77 | 12.34 | 16.89 | 12.73 | 11.18 | 9.63 | 12.00 | 5.61 | 5.55 | 5.65 | 5.76 | | | | (0.43) | (0.68) | (0.20) | (2.59) | (0.35) | (1.25) | (0.48) | (0.51) | (0.55) | (0.35) | (0.27) | | ImageNet200 | Natural | 12.63 | 7.99 | 23.08 | 7.22 | 15.40 | 6.33 | 5.00 | 4.60 | 1.80 | 4.06 | 1.38 | | | | (0.59) | (0.47) | (0.31) | (0.22) | (0.42) | (0.24) | (0.36) | (0.63) | (0.17) | (0.69) | (0.29) | | | Synthetic | 20.17 | 11.74 | 33.69 | 9.51 | 25.49 | 8.61 | 4.19 | 5.37 | 2.78 | 4.53 | 3.58 | | | | (0.74) | (0.80) | (0.73) | (0.51) | (0.66) | (0.50) | (0.14) | (0.88) | (0.23) | (0.79) | (0.33) | | ImageNet | Natural | 8.09 | 6.42 | 21.66 | 5.91 | 8.53 | 5.21 | 5.90 | 3.93 | 1.89 | 2.45 | 0.73 | | | | (0.25) | (0.28) | (0.38) | (0.22) | (0.26) | (0.25) | (0.44) | (0.26) | (0.21) | (0.16) | (0.10) | | | Synthetic | 13.93 | 9.90 | 28.05 | 7.56 | 13.82 | 6.19 | 6.70 | 3.33 | 2.55 | 2.12 | 5.06 | | | | (0.14) | (0.23) | (0.39) | (0.13) | (0.31) | (0.07) | (0.52) | (0.25) | (0.25) | (0.31) | (0.27) | | FMoW-WILDS | Natural | 5.15 | 3.55 | 34.64 | 5.03 | 5.58 | 3.46 | 5.08 | 2.59 | 2.33 | 2.52 | 2.22 | | | | (0.19) | (0.41) | (0.22) | (0.29) | (0.17) | (0.37) | (0.46) | (0.32) | (0.28) | (0.25) | (0.30) | | RxRx1-WILDS | Natural | 6.17 | 6.11 | 21.05 | 5.21 | 6.54 | 6.27 | 6.82 | 5.30 | 5.20 | 5.19 | 5.63 | | | | (0.20) | (0.24) | (0.31) | (0.18) | (0.21) | (0.20) | (0.31) | (0.30) | (0.44) | (0.43) | (0.55) | | Entity-13 | Same | 18.32 | 14.38 | 27.79 | 13.56 | 20.50 | 13.22 | 16.09 | 9.35 | 7.50 | 7.80 | 6.94 | | | | (0.29) | (0.53) | (1.18) | (0.58) | (0.47) | (0.58) | (0.84) | (0.79) | (0.65) | (0.62) | (0.71) | | | Novel | 28.82 | 24.03 | 38.97 | 22.96 | 31.66 | 22.61 | 25.26 | 17.11 | 13.96 | 14.75 | 9.94 | | | | (0.30) | (0.55) | (1.32) | (0.59) | (0.54) | (0.58) | (1.08) | (0.93) | (0.64) | (0.78) | | | Entity-30 | Same | 16.91 | 14.61 | 26.84 | 14.37 | 18.60 | 13.11 | 13.74 | 8.54 | 7.94 | 7.77 | 8.04 | | | | (1.33) | (1.11) | (2.15) | (1.34) | (1.69) | (1.30) | (1.07) | (1.47) | (1.38) | (1.44) | (1.51) | | | Novel | 28.66 | 25.83 | 39.21 | 25.03 | 30.95 | 23.73 | 23.15 | 15.57 | 13.24 | 12.44 | 11.05 | | | | (1.16) | (0.88) | (2.03) | (1.11) | (1.64) | (1.11) | (0.51) | (1.44) | (1.15) | (1.26) | (1.13) | | NonLIVING-26 | Same | 17.43 | 15.95 | 27.70 | 15.40 | 18.06 | 14.58 | 16.99 | 10.79 | 10.13 | 10.05 | 10.29 | | | | (0.90) | (0.86) | (0.90) | (0.69) | (1.00) | (0.78) | (1.25) | (0.62) | (0.32) | (0.46) | (0.79) | | | Novel | 29.51 | 27.75 | 40.02 | 26.77 | 30.36 | 25.93 | 27.70 | 19.64 | 17.75 | 16.90 | 15.69 | | | | (0.86) | (0.82) | (0.76) | (0.82) | (0.95) | (0.80) | (1.42) | (0.68) | (0.53) | (0.60) | (0.83) | | LIVING-17 | Same | 14.28 | 12.21 | 23.46 | 11.16 | 15.22 | 10.78 | 10.49 | 4.92 | 4.23 | 4.19 | 4.73 | | | | (0.96) | (0.93) | (1.16) | (0.90) | (0.96) | (0.99) | (0.97) | (0.57) | (0.42) | (0.35) | (0.24) | | | Novel | 28.91 | 26.35 | 38.62 | 24.91 | 30.32 | 24.52 | 22.49 | 15.42 | 13.02 | 12.29 | 10.34 | | | | (0.66) | (0.73) | (1.01) | (0.61) | (0.59) | (0.74) | (0.85) | (0.59) | (0.53) | (0.73) | (0.62) |\nTable 4: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift for ResNet model. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}], "model": "mistral-ocr-2503-completion", "usage_info": {"pages_processed": 29, "doc_size_bytes": null}} @@ -3501,7 +3462,7 @@ examples: sort_order: "desc" responses: "200": - application/json: {"pagination": {"total_items": 23246, "total_pages": 881485, "current_page": 173326, "page_size": 318395, "has_more": false}, "data": [{"id": "5106c0c7-30fb-4fd3-9083-129b77f9f509", "library_id": "71eb68a2-756e-48b0-9d2b-a04d7bf95ff5", "hash": "", "mime_type": "", "extension": "pdf", "size": 367159, "name": "", "created_at": "2024-09-24T04:50:43.988Z", "processing_status": "", "uploaded_by_id": "7d65f4d8-1997-479f-bfb4-535c0144b48c", "uploaded_by_type": "", "tokens_processing_total": 957230}]} + application/json: {"pagination": {"total_items": 23246, "total_pages": 881485, "current_page": 173326, "page_size": 318395, "has_more": false}, "data": [{"id": "5106c0c7-30fb-4fd3-9083-129b77f9f509", "library_id": "71eb68a2-756e-48b0-9d2b-a04d7bf95ff5", "hash": "", "mime_type": "", "extension": "pdf", "size": 367159, "name": "", "created_at": "2024-09-24T04:50:43.988Z", "uploaded_by_id": "7d65f4d8-1997-479f-bfb4-535c0144b48c", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 957230}]} "422": application/json: {} libraries_documents_upload_v1: @@ -3513,7 +3474,7 @@ examples: multipart/form-data: {"file": "x-file: example.file"} responses: "200": - application/json: {"id": "d40f9b56-c832-405d-aa99-b3e442254dd8", "library_id": "868d7955-009a-4433-bfc6-ad7b4be4e7e4", "hash": "", "mime_type": "", "extension": "m2v", "size": 418415, "name": "", "created_at": "2025-04-30T20:11:27.130Z", "processing_status": "", "uploaded_by_id": "7db8d896-09c9-438c-b6dc-aa5c70102b3f", "uploaded_by_type": "", "tokens_processing_total": 61161} + application/json: {"id": "d40f9b56-c832-405d-aa99-b3e442254dd8", "library_id": "868d7955-009a-4433-bfc6-ad7b4be4e7e4", "hash": "", "mime_type": "", "extension": "m2v", "size": 418415, "name": "", "created_at": "2025-04-30T20:11:27.130Z", "uploaded_by_id": "7db8d896-09c9-438c-b6dc-aa5c70102b3f", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 61161} "422": application/json: {} libraries_documents_get_v1: @@ -3524,7 +3485,7 @@ examples: document_id: "90973aec-0508-4375-8b00-91d732414745" responses: "200": - application/json: {"id": "0de60230-717d-459a-8c0f-fbb9360c01be", "library_id": "e0bf3cf9-cd3b-405b-b842-ac7fcb9c373e", "hash": "", "mime_type": "", "extension": "jpe", "size": 402478, "name": "", "created_at": "2023-07-29T21:43:20.750Z", "processing_status": "", "uploaded_by_id": "d5eadabe-d7f2-4f87-a337-f80c192f886d", "uploaded_by_type": "", "tokens_processing_total": 793889} + application/json: {"id": "0de60230-717d-459a-8c0f-fbb9360c01be", "library_id": "e0bf3cf9-cd3b-405b-b842-ac7fcb9c373e", "hash": "", "mime_type": "", "extension": "jpe", "size": 402478, "name": "", "created_at": "2023-07-29T21:43:20.750Z", "uploaded_by_id": "d5eadabe-d7f2-4f87-a337-f80c192f886d", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 793889} "422": application/json: {} libraries_documents_update_v1: @@ -3537,7 +3498,7 @@ examples: application/json: {} responses: "200": - application/json: {"id": "1111e519-9ba5-42de-9301-938fbfee59fc", "library_id": "70aac5e3-23f7-439b-bbef-090e4c1dbd6d", "hash": "", "mime_type": "", "extension": "m1v", "size": 802305, "name": "", "created_at": "2024-07-02T20:02:03.680Z", "processing_status": "", "uploaded_by_id": "08471957-b27d-4437-8242-57256727dc49", "uploaded_by_type": "", "tokens_processing_total": 806683} + application/json: {"id": "1111e519-9ba5-42de-9301-938fbfee59fc", "library_id": "70aac5e3-23f7-439b-bbef-090e4c1dbd6d", "hash": "", "mime_type": "", "extension": "m1v", "size": 802305, "name": "", "created_at": "2024-07-02T20:02:03.680Z", "uploaded_by_id": "08471957-b27d-4437-8242-57256727dc49", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 806683} "422": application/json: {} libraries_documents_delete_v1: @@ -4328,6 +4289,16 @@ examples: responses: "200": application/json: {"id": "", "object": "batch", "input_files": ["7309e534-200e-43a4-83c5-dc4c2a14c745"], "endpoint": "", "errors": [], "status": "FAILED", "created_at": 157212, "total_requests": 188914, "completed_requests": 685483, "succeeded_requests": 127060, "failed_requests": 428561} + agents_api_v1_agents_delete_alias: + speakeasy-default-agents-api-v1-agents-delete-alias: + parameters: + path: + agent_id: "" + query: + alias: "" + responses: + "422": + application/json: {} examplesVersion: 1.0.2 generatedTests: {} generatedFiles: diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 23b915b5..733650dc 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -13,8 +13,9 @@ generation: requestResponseComponentNamesFeb2024: true securityFeb2025: true sharedErrorComponentsApr2025: true - methodSignaturesApr2024: true sharedNestedComponentsJan2026: true + nameOverrideFeb2026: true + methodSignaturesApr2024: true auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -22,6 +23,7 @@ generation: schemas: allOfMergeStrategy: shallowMerge requestBodyFieldName: "" + versioningStrategy: automatic persistentEdits: enabled: "true" tests: @@ -29,26 +31,31 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0a3 + version: 2.0.0-a3.1 additionalDependencies: dev: pytest: ^8.2.2 pytest-asyncio: ^0.23.7 + main: {} allowedRedefinedBuiltins: - id - object + - input + - dir asyncMode: both authors: - Mistral baseErrorName: MistralError clientServerStatusCodesAsErrors: true - constFieldCasing: upper + constFieldCasing: normal defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API. enableCustomCodeRegions: true enumFormat: union envVarPrefix: MISTRAL fixFlags: + asyncPaginationSep2025: true + conflictResistantModelImportsFeb2026: true responseRequiredSep2024: true flatAdditionalProperties: true flattenGlobalSecurity: true @@ -60,17 +67,17 @@ python: option: openapi paths: callbacks: "" - errors: "" + errors: errors operations: "" shared: "" webhooks: "" inferUnionDiscriminators: true inputModelSuffix: input license: "" - maxMethodParams: 15 + maxMethodParams: 999 methodArguments: infer-optional-args moduleName: mistralai.client - multipartArrayFormat: legacy + multipartArrayFormat: standard outputModelSuffix: output packageManager: uv packageName: mistralai @@ -80,3 +87,4 @@ python: responseFormat: flat sseFlatResponse: false templateVersion: v2 + useAsyncHooks: false diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 4aa0af42..d051080f 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,4 +1,4 @@ -speakeasyVersion: 1.685.0 +speakeasyVersion: 1.729.0 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure @@ -14,8 +14,8 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:74d0de7750f6a1878b68c9da683eba7a447d7c367131d0cb8f5c3b1e05829624 - sourceBlobDigest: sha256:41e8354c48993fc29be68959d835ea4f8e0cc1d4b4fbd527afcd970bc02c62a2 + sourceRevisionDigest: sha256:4f8e25101b35a66b9c93089fe3d491990268bdbefb70a349740e01ba9c8e28f8 + sourceBlobDigest: sha256:8566b35549178910c6fd4d005474d612bb9c476ef58785bb51c46251de145f71 tags: - latest targets: @@ -25,24 +25,24 @@ targets: sourceRevisionDigest: sha256:e32d21a6317d1bca6ab29f05603b96038e841752c2698aab47f434ea0d6530b7 sourceBlobDigest: sha256:2dad2b1b7a79de6917c363ce7e870d11efe31ac08e3bfe0258f72823fe1ad13e codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:a34c3049c604d0bb67101d042e959f14098964fe784f98975a9201c84dbf44d0 + codeSamplesRevisionDigest: sha256:248e5daaa44589805664ab1479502885758fde0f1da3b384b97b1a09d74c8256 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud sourceRevisionDigest: sha256:4d9938ab74c4d41d62cd24234c8b8109e286c4aeec093e21d369259a43173113 sourceBlobDigest: sha256:5a558d5ea7a936723c7a5540db5a1fba63d85d25b453372e1cf16395b30c98d3 codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:fa36e5999e79c32e8b2c1317cc0d6ed179912ced15194f02b5f80da22e45ae5f + codeSamplesRevisionDigest: sha256:f6c4dc988e9b7be6f8d8087d14b2269be601bb9bff2227b07e1018efe88e1556 mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:74d0de7750f6a1878b68c9da683eba7a447d7c367131d0cb8f5c3b1e05829624 - sourceBlobDigest: sha256:41e8354c48993fc29be68959d835ea4f8e0cc1d4b4fbd527afcd970bc02c62a2 + sourceRevisionDigest: sha256:4f8e25101b35a66b9c93089fe3d491990268bdbefb70a349740e01ba9c8e28f8 + sourceBlobDigest: sha256:8566b35549178910c6fd4d005474d612bb9c476ef58785bb51c46251de145f71 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:99fcae1bc81801e3825648a44f5ffa62a8f124e3186e5570be40414de164e7f2 + codeSamplesRevisionDigest: sha256:f3cf9d6d99a27d6e753bd6e1a2f2c2fb290f412a455576de4bab610ab4825939 workflow: workflowVersion: 1.0.0 - speakeasyVersion: 1.685.0 + speakeasyVersion: 1.729.0 sources: mistral-azure-source: inputs: diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index ba109c09..65d6d202 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -1,5 +1,5 @@ workflowVersion: 1.0.0 -speakeasyVersion: 1.685.0 +speakeasyVersion: 1.729.0 sources: mistral-azure-source: inputs: diff --git a/Makefile b/Makefile index a169d78f..bba024ad 100644 --- a/Makefile +++ b/Makefile @@ -1,19 +1,29 @@ -.PHONY: help test-generate update-speakeasy-version +.PHONY: help generate test-generate update-speakeasy-version check-config help: @echo "Available targets:" + @echo " make generate Generate all SDKs (main, Azure, GCP)" @echo " make test-generate Test SDK generation locally" @echo " make update-speakeasy-version VERSION=x.y.z Update Speakeasy CLI version" + @echo " make check-config Check gen.yaml against recommended defaults" @echo "" @echo "Note: Production SDK generation is done via GitHub Actions:" @echo " .github/workflows/sdk_generation_mistralai_sdk.yaml" +# Generate all SDKs (main, Azure, GCP) +generate: + speakeasy run -t all + # Test SDK generation locally. # For production, use GitHub Actions: .github/workflows/sdk_generation_mistralai_sdk.yaml # This uses the Speakeasy CLI version defined in .speakeasy/workflow.yaml test-generate: speakeasy run --skip-versioning +# Check gen.yaml configuration against Speakeasy recommended defaults +check-config: + speakeasy configure generation check + # Update the Speakeasy CLI version (the code generator tool). # This modifies speakeasyVersion in .speakeasy/workflow.yaml and regenerates the SDK. # Usage: make update-speakeasy-version VERSION=1.685.0 diff --git a/README.md b/README.md index a774a9e1..dd98b5cc 100644 --- a/README.md +++ b/README.md @@ -27,9 +27,7 @@ $ source ~/.zshenv ## Summary -Mistral AI API: Dora OpenAPI schema - -Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. +Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. @@ -161,8 +159,8 @@ with Mistral( res = mistral.chat.complete(model="mistral-large-latest", messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], stream=False, response_format={ "type": "text", @@ -190,8 +188,8 @@ async def main(): res = await mistral.chat.complete_async(model="mistral-large-latest", messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], stream=False, response_format={ "type": "text", @@ -269,8 +267,8 @@ with Mistral( res = mistral.agents.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], agent_id="", stream=False, response_format={ "type": "text", @@ -298,8 +296,8 @@ async def main(): res = await mistral.agents.complete_async(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], agent_id="", stream=False, response_format={ "type": "text", @@ -616,7 +614,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.start_stream(inputs="", stream=True, completion_args={ + res = mistral.beta.conversations.start_stream(inputs=[ + { + "object": "entry", + "type": "function.result", + "tool_call_id": "", + "result": "", + }, + ], stream=True, completion_args={ "response_format": { "type": "text", }, @@ -653,7 +658,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.upload(library_id="f973c54e-979a-4464-9d36-8cc31beb21fe", file={ + res = mistral.beta.libraries.documents.upload(library_id="a02150d9-5ee0-4877-b62c-28b1fcdf3b76", file={ "file_name": "example.file", "content": open("example.file", "rb"), }) @@ -680,8 +685,8 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.models.list( - retries=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) + res = mistral.models.list(, + RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) # Handle response print(res) @@ -711,7 +716,7 @@ with Mistral( ## Error Handling -[`MistralError`](./src/mistralai/client/models/mistralerror.py) is the base class for all HTTP error responses. It has the following properties: +[`MistralError`](./src/mistralai/client/errors/mistralerror.py) is the base class for all HTTP error responses. It has the following properties: | Property | Type | Description | | ------------------ | ---------------- | --------------------------------------------------------------------------------------- | @@ -724,8 +729,7 @@ with Mistral( ### Example ```python -import mistralai.client -from mistralai.client import Mistral, models +from mistralai.client import Mistral, errors import os @@ -741,7 +745,7 @@ with Mistral( print(res) - except models.MistralError as e: + except errors.MistralError as e: # The base class for HTTP error responses print(e.message) print(e.status_code) @@ -750,13 +754,13 @@ with Mistral( print(e.raw_response) # Depending on the method different errors may be thrown - if isinstance(e, models.HTTPValidationError): - print(e.data.detail) # Optional[List[mistralai.client.ValidationError]] + if isinstance(e, errors.HTTPValidationError): + print(e.data.detail) # Optional[List[models.ValidationError]] ``` ### Error Classes **Primary error:** -* [`MistralError`](./src/mistralai/client/models/mistralerror.py): The base class for HTTP error responses. +* [`MistralError`](./src/mistralai/client/errors/mistralerror.py): The base class for HTTP error responses.
Less common errors (6) @@ -768,9 +772,9 @@ with Mistral( * [`httpx.TimeoutException`](https://www.python-httpx.org/exceptions/#httpx.TimeoutException): HTTP request timed out. -**Inherit from [`MistralError`](./src/mistralai/client/models/mistralerror.py)**: -* [`HTTPValidationError`](./src/mistralai/client/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 53 of 75 methods.* -* [`ResponseValidationError`](./src/mistralai/client/models/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute. +**Inherit from [`MistralError`](./src/mistralai/client/errors/mistralerror.py)**: +* [`HTTPValidationError`](./src/mistralai/client/errors/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 53 of 75 methods.* +* [`ResponseValidationError`](./src/mistralai/client/errors/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute.
diff --git a/USAGE.md b/USAGE.md index 18103864..f71bbabc 100644 --- a/USAGE.md +++ b/USAGE.md @@ -15,8 +15,8 @@ with Mistral( res = mistral.chat.complete(model="mistral-large-latest", messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], stream=False, response_format={ "type": "text", @@ -44,8 +44,8 @@ async def main(): res = await mistral.chat.complete_async(model="mistral-large-latest", messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], stream=False, response_format={ "type": "text", @@ -123,8 +123,8 @@ with Mistral( res = mistral.agents.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], agent_id="", stream=False, response_format={ "type": "text", @@ -152,8 +152,8 @@ async def main(): res = await mistral.agents.complete_async(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], agent_id="", stream=False, response_format={ "type": "text", diff --git a/docs/models/httpvalidationerror.md b/docs/errors/httpvalidationerror.md similarity index 100% rename from docs/models/httpvalidationerror.md rename to docs/errors/httpvalidationerror.md diff --git a/docs/models/agent.md b/docs/models/agent.md index e335d889..4de5a901 100644 --- a/docs/models/agent.md +++ b/docs/models/agent.md @@ -13,7 +13,7 @@ | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.AgentObject]](../models/agentobject.md) | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["agent"]]* | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | | `version` | *int* | :heavy_check_mark: | N/A | | `versions` | List[*int*] | :heavy_check_mark: | N/A | diff --git a/docs/models/agentconversation.md b/docs/models/agentconversation.md index a2d61731..451f6fb8 100644 --- a/docs/models/agentconversation.md +++ b/docs/models/agentconversation.md @@ -8,7 +8,7 @@ | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | -| `object` | [Optional[models.AgentConversationObject]](../models/agentconversationobject.md) | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["conversation"]]* | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | | `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | diff --git a/docs/models/agentconversationobject.md b/docs/models/agentconversationobject.md deleted file mode 100644 index ea7cc75c..00000000 --- a/docs/models/agentconversationobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# AgentConversationObject - - -## Values - -| Name | Value | -| -------------- | -------------- | -| `CONVERSATION` | conversation | \ No newline at end of file diff --git a/docs/models/agentcreationrequest.md b/docs/models/agentcreationrequest.md deleted file mode 100644 index f0f0fdbc..00000000 --- a/docs/models/agentcreationrequest.md +++ /dev/null @@ -1,16 +0,0 @@ -# AgentCreationRequest - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentCreationRequestTool](../models/agentcreationrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `model` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffentry.md b/docs/models/agenthandoffentry.md index 8831b0eb..2b689ec7 100644 --- a/docs/models/agenthandoffentry.md +++ b/docs/models/agenthandoffentry.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `object` | [Optional[models.AgentHandoffEntryObject]](../models/agenthandoffentryobject.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.AgentHandoffEntryType]](../models/agenthandoffentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | -| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | -| `next_agent_id` | *str* | :heavy_check_mark: | N/A | -| `next_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["agent.handoff"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | +| `next_agent_id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffentryobject.md b/docs/models/agenthandoffentryobject.md deleted file mode 100644 index 4bb876fb..00000000 --- a/docs/models/agenthandoffentryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# AgentHandoffEntryObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/agenthandoffentrytype.md b/docs/models/agenthandoffentrytype.md deleted file mode 100644 index 527ebceb..00000000 --- a/docs/models/agenthandoffentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# AgentHandoffEntryType - - -## Values - -| Name | Value | -| --------------- | --------------- | -| `AGENT_HANDOFF` | agent.handoff | \ No newline at end of file diff --git a/docs/models/agentobject.md b/docs/models/agentobject.md deleted file mode 100644 index 70e143b0..00000000 --- a/docs/models/agentobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# AgentObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `AGENT` | agent | \ No newline at end of file diff --git a/docs/models/createorupdateagentaliasrequest.md b/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md similarity index 90% rename from docs/models/createorupdateagentaliasrequest.md rename to docs/models/agentsapiv1agentscreateorupdatealiasrequest.md index af2591eb..79406434 100644 --- a/docs/models/createorupdateagentaliasrequest.md +++ b/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md @@ -1,4 +1,4 @@ -# CreateOrUpdateAgentAliasRequest +# AgentsAPIV1AgentsCreateOrUpdateAliasRequest ## Fields diff --git a/docs/models/deleteagentaliasrequest.md b/docs/models/agentsapiv1agentsdeletealiasrequest.md similarity index 90% rename from docs/models/deleteagentaliasrequest.md rename to docs/models/agentsapiv1agentsdeletealiasrequest.md index 17812ec4..8e95c0c3 100644 --- a/docs/models/deleteagentaliasrequest.md +++ b/docs/models/agentsapiv1agentsdeletealiasrequest.md @@ -1,4 +1,4 @@ -# DeleteAgentAliasRequest +# AgentsAPIV1AgentsDeleteAliasRequest ## Fields diff --git a/docs/models/deleteagentrequest.md b/docs/models/agentsapiv1agentsdeleterequest.md similarity index 89% rename from docs/models/deleteagentrequest.md rename to docs/models/agentsapiv1agentsdeleterequest.md index 0aaacae4..2799f418 100644 --- a/docs/models/deleteagentrequest.md +++ b/docs/models/agentsapiv1agentsdeleterequest.md @@ -1,4 +1,4 @@ -# DeleteAgentRequest +# AgentsAPIV1AgentsDeleteRequest ## Fields diff --git a/docs/models/getagentagentversion.md b/docs/models/agentsapiv1agentsgetagentversion.md similarity index 79% rename from docs/models/getagentagentversion.md rename to docs/models/agentsapiv1agentsgetagentversion.md index 6d7b3f1d..7fb9f2d5 100644 --- a/docs/models/getagentagentversion.md +++ b/docs/models/agentsapiv1agentsgetagentversion.md @@ -1,4 +1,4 @@ -# GetAgentAgentVersion +# AgentsAPIV1AgentsGetAgentVersion ## Supported Types diff --git a/docs/models/agentsapiv1agentsgetrequest.md b/docs/models/agentsapiv1agentsgetrequest.md new file mode 100644 index 00000000..ceffe009 --- /dev/null +++ b/docs/models/agentsapiv1agentsgetrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1AgentsGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.AgentsAPIV1AgentsGetAgentVersion]](../models/agentsapiv1agentsgetagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/getagentversionrequest.md b/docs/models/agentsapiv1agentsgetversionrequest.md similarity index 90% rename from docs/models/getagentversionrequest.md rename to docs/models/agentsapiv1agentsgetversionrequest.md index c98fee9d..96a73589 100644 --- a/docs/models/getagentversionrequest.md +++ b/docs/models/agentsapiv1agentsgetversionrequest.md @@ -1,4 +1,4 @@ -# GetAgentVersionRequest +# AgentsAPIV1AgentsGetVersionRequest ## Fields diff --git a/docs/models/listagentsrequest.md b/docs/models/agentsapiv1agentslistrequest.md similarity index 98% rename from docs/models/listagentsrequest.md rename to docs/models/agentsapiv1agentslistrequest.md index 79aec3ea..4785a54c 100644 --- a/docs/models/listagentsrequest.md +++ b/docs/models/agentsapiv1agentslistrequest.md @@ -1,4 +1,4 @@ -# ListAgentsRequest +# AgentsAPIV1AgentsListRequest ## Fields diff --git a/docs/models/listagentaliasesrequest.md b/docs/models/agentsapiv1agentslistversionaliasesrequest.md similarity index 85% rename from docs/models/listagentaliasesrequest.md rename to docs/models/agentsapiv1agentslistversionaliasesrequest.md index b3570cb8..3083bf92 100644 --- a/docs/models/listagentaliasesrequest.md +++ b/docs/models/agentsapiv1agentslistversionaliasesrequest.md @@ -1,4 +1,4 @@ -# ListAgentAliasesRequest +# AgentsAPIV1AgentsListVersionAliasesRequest ## Fields diff --git a/docs/models/listagentversionsrequest.md b/docs/models/agentsapiv1agentslistversionsrequest.md similarity index 94% rename from docs/models/listagentversionsrequest.md rename to docs/models/agentsapiv1agentslistversionsrequest.md index ba8ddaa5..91831700 100644 --- a/docs/models/listagentversionsrequest.md +++ b/docs/models/agentsapiv1agentslistversionsrequest.md @@ -1,4 +1,4 @@ -# ListAgentVersionsRequest +# AgentsAPIV1AgentsListVersionsRequest ## Fields diff --git a/docs/models/agentsapiv1agentsupdaterequest.md b/docs/models/agentsapiv1agentsupdaterequest.md new file mode 100644 index 00000000..7ef60bec --- /dev/null +++ b/docs/models/agentsapiv1agentsupdaterequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1AgentsUpdateRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `update_agent_request` | [models.UpdateAgentRequest](../models/updateagentrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/updateagentversionrequest.md b/docs/models/agentsapiv1agentsupdateversionrequest.md similarity index 89% rename from docs/models/updateagentversionrequest.md rename to docs/models/agentsapiv1agentsupdateversionrequest.md index b83eb867..e937acc9 100644 --- a/docs/models/updateagentversionrequest.md +++ b/docs/models/agentsapiv1agentsupdateversionrequest.md @@ -1,4 +1,4 @@ -# UpdateAgentVersionRequest +# AgentsAPIV1AgentsUpdateVersionRequest ## Fields diff --git a/docs/models/appendconversationrequest.md b/docs/models/agentsapiv1conversationsappendrequest.md similarity index 96% rename from docs/models/appendconversationrequest.md rename to docs/models/agentsapiv1conversationsappendrequest.md index 977d8e8b..ac8a00ec 100644 --- a/docs/models/appendconversationrequest.md +++ b/docs/models/agentsapiv1conversationsappendrequest.md @@ -1,4 +1,4 @@ -# AppendConversationRequest +# AgentsAPIV1ConversationsAppendRequest ## Fields diff --git a/docs/models/appendconversationstreamrequest.md b/docs/models/agentsapiv1conversationsappendstreamrequest.md similarity index 96% rename from docs/models/appendconversationstreamrequest.md rename to docs/models/agentsapiv1conversationsappendstreamrequest.md index a23231c2..dbc330f1 100644 --- a/docs/models/appendconversationstreamrequest.md +++ b/docs/models/agentsapiv1conversationsappendstreamrequest.md @@ -1,4 +1,4 @@ -# AppendConversationStreamRequest +# AgentsAPIV1ConversationsAppendStreamRequest ## Fields diff --git a/docs/models/getconversationrequest.md b/docs/models/agentsapiv1conversationsdeleterequest.md similarity index 95% rename from docs/models/getconversationrequest.md rename to docs/models/agentsapiv1conversationsdeleterequest.md index 8a66a8b0..c6eed281 100644 --- a/docs/models/getconversationrequest.md +++ b/docs/models/agentsapiv1conversationsdeleterequest.md @@ -1,4 +1,4 @@ -# GetConversationRequest +# AgentsAPIV1ConversationsDeleteRequest ## Fields diff --git a/docs/models/deleteconversationrequest.md b/docs/models/agentsapiv1conversationsgetrequest.md similarity index 95% rename from docs/models/deleteconversationrequest.md rename to docs/models/agentsapiv1conversationsgetrequest.md index 39d9e5df..67d450c8 100644 --- a/docs/models/deleteconversationrequest.md +++ b/docs/models/agentsapiv1conversationsgetrequest.md @@ -1,4 +1,4 @@ -# DeleteConversationRequest +# AgentsAPIV1ConversationsGetRequest ## Fields diff --git a/docs/models/getconversationhistoryrequest.md b/docs/models/agentsapiv1conversationshistoryrequest.md similarity index 94% rename from docs/models/getconversationhistoryrequest.md rename to docs/models/agentsapiv1conversationshistoryrequest.md index fc90282b..7e5d39e9 100644 --- a/docs/models/getconversationhistoryrequest.md +++ b/docs/models/agentsapiv1conversationshistoryrequest.md @@ -1,4 +1,4 @@ -# GetConversationHistoryRequest +# AgentsAPIV1ConversationsHistoryRequest ## Fields diff --git a/docs/models/listconversationsrequest.md b/docs/models/agentsapiv1conversationslistrequest.md similarity index 92% rename from docs/models/listconversationsrequest.md rename to docs/models/agentsapiv1conversationslistrequest.md index d99b4208..62c9011f 100644 --- a/docs/models/listconversationsrequest.md +++ b/docs/models/agentsapiv1conversationslistrequest.md @@ -1,4 +1,4 @@ -# ListConversationsRequest +# AgentsAPIV1ConversationsListRequest ## Fields diff --git a/docs/models/listconversationsresponse.md b/docs/models/agentsapiv1conversationslistresponse.md similarity index 84% rename from docs/models/listconversationsresponse.md rename to docs/models/agentsapiv1conversationslistresponse.md index 9d611c55..b233ee20 100644 --- a/docs/models/listconversationsresponse.md +++ b/docs/models/agentsapiv1conversationslistresponse.md @@ -1,4 +1,4 @@ -# ListConversationsResponse +# AgentsAPIV1ConversationsListResponse ## Supported Types diff --git a/docs/models/getconversationmessagesrequest.md b/docs/models/agentsapiv1conversationsmessagesrequest.md similarity index 94% rename from docs/models/getconversationmessagesrequest.md rename to docs/models/agentsapiv1conversationsmessagesrequest.md index fd037fea..a91ab046 100644 --- a/docs/models/getconversationmessagesrequest.md +++ b/docs/models/agentsapiv1conversationsmessagesrequest.md @@ -1,4 +1,4 @@ -# GetConversationMessagesRequest +# AgentsAPIV1ConversationsMessagesRequest ## Fields diff --git a/docs/models/restartconversationrequest.md b/docs/models/agentsapiv1conversationsrestartrequest.md similarity index 96% rename from docs/models/restartconversationrequest.md rename to docs/models/agentsapiv1conversationsrestartrequest.md index f24f14e6..a18a41f5 100644 --- a/docs/models/restartconversationrequest.md +++ b/docs/models/agentsapiv1conversationsrestartrequest.md @@ -1,4 +1,4 @@ -# RestartConversationRequest +# AgentsAPIV1ConversationsRestartRequest ## Fields diff --git a/docs/models/restartconversationstreamrequest.md b/docs/models/agentsapiv1conversationsrestartstreamrequest.md similarity index 96% rename from docs/models/restartconversationstreamrequest.md rename to docs/models/agentsapiv1conversationsrestartstreamrequest.md index daa661a9..7548286a 100644 --- a/docs/models/restartconversationstreamrequest.md +++ b/docs/models/agentsapiv1conversationsrestartstreamrequest.md @@ -1,4 +1,4 @@ -# RestartConversationStreamRequest +# AgentsAPIV1ConversationsRestartStreamRequest ## Fields diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index d87dc7da..33435732 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -11,7 +11,7 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.AgentsCompletionRequestMessage](../models/agentscompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index dd1804a1..407be8e0 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -11,7 +11,7 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.AgentsCompletionStreamRequestMessage](../models/agentscompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | diff --git a/docs/models/unarchiveftmodelout.md b/docs/models/archivemodelresponse.md similarity index 96% rename from docs/models/unarchiveftmodelout.md rename to docs/models/archivemodelresponse.md index 12c3d745..276656d1 100644 --- a/docs/models/unarchiveftmodelout.md +++ b/docs/models/archivemodelresponse.md @@ -1,4 +1,4 @@ -# UnarchiveFTModelOut +# ArchiveModelResponse ## Fields diff --git a/docs/models/assistantmessage.md b/docs/models/assistantmessage.md index 3d0bd90b..9ef63837 100644 --- a/docs/models/assistantmessage.md +++ b/docs/models/assistantmessage.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | | `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | | `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | \ No newline at end of file diff --git a/docs/models/assistantmessagerole.md b/docs/models/assistantmessagerole.md deleted file mode 100644 index 658229e7..00000000 --- a/docs/models/assistantmessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# AssistantMessageRole - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/docs/models/audiochunk.md b/docs/models/audiochunk.md index 8a04af04..1ba8b0f5 100644 --- a/docs/models/audiochunk.md +++ b/docs/models/audiochunk.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ------------------------ | ------------------------ | ------------------------ | ------------------------ | -| `input_audio` | *str* | :heavy_check_mark: | N/A | -| `type` | *Literal["input_audio"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| `type` | *Literal["input_audio"]* | :heavy_check_mark: | N/A | +| `input_audio` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/audiotranscriptionrequest.md b/docs/models/audiotranscriptionrequest.md index d7f5bd51..80bd5301 100644 --- a/docs/models/audiotranscriptionrequest.md +++ b/docs/models/audiotranscriptionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to be used. | voxtral-mini-latest | +| `model` | *str* | :heavy_check_mark: | ID of the model to be used. | **Example 1:** voxtral-mini-latest
**Example 2:** voxtral-mini-2507 | | `file` | [Optional[models.File]](../models/file.md) | :heavy_minus_sign: | N/A | | | `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | | | `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | | diff --git a/docs/models/batchjobout.md b/docs/models/batchjob.md similarity index 99% rename from docs/models/batchjobout.md rename to docs/models/batchjob.md index 5f101173..162e2cff 100644 --- a/docs/models/batchjobout.md +++ b/docs/models/batchjob.md @@ -1,4 +1,4 @@ -# BatchJobOut +# BatchJob ## Fields diff --git a/docs/models/batchjobsout.md b/docs/models/batchjobsout.md deleted file mode 100644 index 7a9d6f68..00000000 --- a/docs/models/batchjobsout.md +++ /dev/null @@ -1,10 +0,0 @@ -# BatchJobsOut - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `data` | List[[models.BatchJobOut](../models/batchjobout.md)] | :heavy_minus_sign: | N/A | -| `object` | *Optional[Literal["list"]]* | :heavy_minus_sign: | N/A | -| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/cancelfinetuningjobresponse.md b/docs/models/cancelfinetuningjobresponse.md deleted file mode 100644 index c512342e..00000000 --- a/docs/models/cancelfinetuningjobresponse.md +++ /dev/null @@ -1,19 +0,0 @@ -# CancelFineTuningJobResponse - -OK - - -## Supported Types - -### `models.ClassifierDetailedJobOut` - -```python -value: models.ClassifierDetailedJobOut = /* values here */ -``` - -### `models.CompletionDetailedJobOut` - -```python -value: models.CompletionDetailedJobOut = /* values here */ -``` - diff --git a/docs/models/chatclassificationrequest.md b/docs/models/chatclassificationrequest.md index 910d62ae..ba9c95ea 100644 --- a/docs/models/chatclassificationrequest.md +++ b/docs/models/chatclassificationrequest.md @@ -6,4 +6,4 @@ | Field | Type | Required | Description | | ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | | `model` | *str* | :heavy_check_mark: | N/A | -| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Chat to classify | \ No newline at end of file +| `input` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Chat to classify | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index f3abeeff..921161fa 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -14,7 +14,7 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.ChatCompletionRequestMessage](../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index 42792d39..8761f000 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -14,7 +14,7 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.ChatCompletionStreamRequestMessage](../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | diff --git a/docs/models/checkpointout.md b/docs/models/checkpoint.md similarity index 96% rename from docs/models/checkpointout.md rename to docs/models/checkpoint.md index 053592d2..f7f35530 100644 --- a/docs/models/checkpointout.md +++ b/docs/models/checkpoint.md @@ -1,10 +1,10 @@ -# CheckpointOut +# Checkpoint ## Fields | Field | Type | Required | Description | Example | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `metrics` | [models.MetricOut](../models/metricout.md) | :heavy_check_mark: | Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase). | | +| `metrics` | [models.Metric](../models/metric.md) | :heavy_check_mark: | Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase). | | | `step_number` | *int* | :heavy_check_mark: | The step number that the checkpoint was created at. | | | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the checkpoint was created. | 1716963433 | \ No newline at end of file diff --git a/docs/models/classifierdetailedjoboutintegration.md b/docs/models/classifierdetailedjoboutintegration.md deleted file mode 100644 index 9dfa6e8a..00000000 --- a/docs/models/classifierdetailedjoboutintegration.md +++ /dev/null @@ -1,11 +0,0 @@ -# ClassifierDetailedJobOutIntegration - - -## Supported Types - -### `models.WandbIntegrationOut` - -```python -value: models.WandbIntegrationOut = /* values here */ -``` - diff --git a/docs/models/classifierfinetunedmodel.md b/docs/models/classifierfinetunedmodel.md new file mode 100644 index 00000000..ad05f931 --- /dev/null +++ b/docs/models/classifierfinetunedmodel.md @@ -0,0 +1,23 @@ +# ClassifierFineTunedModel + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `root_version` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.FineTunedModelCapabilities](../models/finetunedmodelcapabilities.md) | :heavy_check_mark: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetResult](../models/classifiertargetresult.md)] | :heavy_check_mark: | N/A | +| `model_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierjobout.md b/docs/models/classifierfinetuningjob.md similarity index 97% rename from docs/models/classifierjobout.md rename to docs/models/classifierfinetuningjob.md index ceecef5d..369756ba 100644 --- a/docs/models/classifierjobout.md +++ b/docs/models/classifierfinetuningjob.md @@ -1,4 +1,4 @@ -# ClassifierJobOut +# ClassifierFineTuningJob ## Fields @@ -8,7 +8,7 @@ | `id` | *str* | :heavy_check_mark: | The ID of the job. | | `auto_start` | *bool* | :heavy_check_mark: | N/A | | `model` | *str* | :heavy_check_mark: | N/A | -| `status` | [models.ClassifierJobOutStatus](../models/classifierjoboutstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | +| `status` | [models.ClassifierFineTuningJobStatus](../models/classifierfinetuningjobstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | | `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | | `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | @@ -16,8 +16,8 @@ | `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | -| `integrations` | List[[models.ClassifierJobOutIntegration](../models/classifierjoboutintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | +| `integrations` | List[[models.ClassifierFineTuningJobIntegration](../models/classifierfinetuningjobintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | -| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.JobMetadata]](../models/jobmetadata.md) | :heavy_minus_sign: | N/A | | `job_type` | *Literal["classifier"]* | :heavy_check_mark: | The type of job (`FT` for fine-tuning). | | `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierdetailedjobout.md b/docs/models/classifierfinetuningjobdetails.md similarity index 94% rename from docs/models/classifierdetailedjobout.md rename to docs/models/classifierfinetuningjobdetails.md index fb532449..c5efdf1c 100644 --- a/docs/models/classifierdetailedjobout.md +++ b/docs/models/classifierfinetuningjobdetails.md @@ -1,4 +1,4 @@ -# ClassifierDetailedJobOut +# ClassifierFineTuningJobDetails ## Fields @@ -8,7 +8,7 @@ | `id` | *str* | :heavy_check_mark: | N/A | | `auto_start` | *bool* | :heavy_check_mark: | N/A | | `model` | *str* | :heavy_check_mark: | N/A | -| `status` | [models.ClassifierDetailedJobOutStatus](../models/classifierdetailedjoboutstatus.md) | :heavy_check_mark: | N/A | +| `status` | [models.ClassifierFineTuningJobDetailsStatus](../models/classifierfinetuningjobdetailsstatus.md) | :heavy_check_mark: | N/A | | `created_at` | *int* | :heavy_check_mark: | N/A | | `modified_at` | *int* | :heavy_check_mark: | N/A | | `training_files` | List[*str*] | :heavy_check_mark: | N/A | @@ -16,11 +16,11 @@ | `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | N/A | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `integrations` | List[[models.ClassifierDetailedJobOutIntegration](../models/classifierdetailedjoboutintegration.md)] | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.ClassifierFineTuningJobDetailsIntegration](../models/classifierfinetuningjobdetailsintegration.md)] | :heavy_minus_sign: | N/A | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.JobMetadata]](../models/jobmetadata.md) | :heavy_minus_sign: | N/A | | `job_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A | | `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | -| `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | -| `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | -| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | \ No newline at end of file +| `events` | List[[models.Event](../models/event.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | +| `checkpoints` | List[[models.Checkpoint](../models/checkpoint.md)] | :heavy_minus_sign: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetResult](../models/classifiertargetresult.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierfinetuningjobdetailsintegration.md b/docs/models/classifierfinetuningjobdetailsintegration.md new file mode 100644 index 00000000..438a35d9 --- /dev/null +++ b/docs/models/classifierfinetuningjobdetailsintegration.md @@ -0,0 +1,11 @@ +# ClassifierFineTuningJobDetailsIntegration + + +## Supported Types + +### `models.WandbIntegrationResult` + +```python +value: models.WandbIntegrationResult = /* values here */ +``` + diff --git a/docs/models/classifierdetailedjoboutstatus.md b/docs/models/classifierfinetuningjobdetailsstatus.md similarity index 94% rename from docs/models/classifierdetailedjoboutstatus.md rename to docs/models/classifierfinetuningjobdetailsstatus.md index c3118aaf..058c6583 100644 --- a/docs/models/classifierdetailedjoboutstatus.md +++ b/docs/models/classifierfinetuningjobdetailsstatus.md @@ -1,4 +1,4 @@ -# ClassifierDetailedJobOutStatus +# ClassifierFineTuningJobDetailsStatus ## Values diff --git a/docs/models/classifierfinetuningjobintegration.md b/docs/models/classifierfinetuningjobintegration.md new file mode 100644 index 00000000..820aee4c --- /dev/null +++ b/docs/models/classifierfinetuningjobintegration.md @@ -0,0 +1,11 @@ +# ClassifierFineTuningJobIntegration + + +## Supported Types + +### `models.WandbIntegrationResult` + +```python +value: models.WandbIntegrationResult = /* values here */ +``` + diff --git a/docs/models/completionjoboutstatus.md b/docs/models/classifierfinetuningjobstatus.md similarity index 95% rename from docs/models/completionjoboutstatus.md rename to docs/models/classifierfinetuningjobstatus.md index 91754945..ca829885 100644 --- a/docs/models/completionjoboutstatus.md +++ b/docs/models/classifierfinetuningjobstatus.md @@ -1,4 +1,4 @@ -# CompletionJobOutStatus +# ClassifierFineTuningJobStatus The current status of the fine-tuning job. diff --git a/docs/models/classifierftmodelout.md b/docs/models/classifierftmodelout.md deleted file mode 100644 index 6e7afbbe..00000000 --- a/docs/models/classifierftmodelout.md +++ /dev/null @@ -1,23 +0,0 @@ -# ClassifierFTModelOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | -| `created` | *int* | :heavy_check_mark: | N/A | -| `owned_by` | *str* | :heavy_check_mark: | N/A | -| `workspace_id` | *str* | :heavy_check_mark: | N/A | -| `root` | *str* | :heavy_check_mark: | N/A | -| `root_version` | *str* | :heavy_check_mark: | N/A | -| `archived` | *bool* | :heavy_check_mark: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | -| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | -| `job` | *str* | :heavy_check_mark: | N/A | -| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | -| `model_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierjoboutintegration.md b/docs/models/classifierjoboutintegration.md deleted file mode 100644 index 33af8a70..00000000 --- a/docs/models/classifierjoboutintegration.md +++ /dev/null @@ -1,11 +0,0 @@ -# ClassifierJobOutIntegration - - -## Supported Types - -### `models.WandbIntegrationOut` - -```python -value: models.WandbIntegrationOut = /* values here */ -``` - diff --git a/docs/models/classifiertargetin.md b/docs/models/classifiertarget.md similarity index 99% rename from docs/models/classifiertargetin.md rename to docs/models/classifiertarget.md index 78cab67b..f8c99e2e 100644 --- a/docs/models/classifiertargetin.md +++ b/docs/models/classifiertarget.md @@ -1,4 +1,4 @@ -# ClassifierTargetIn +# ClassifierTarget ## Fields diff --git a/docs/models/classifiertargetout.md b/docs/models/classifiertargetresult.md similarity index 98% rename from docs/models/classifiertargetout.md rename to docs/models/classifiertargetresult.md index 57535ae5..ccadc623 100644 --- a/docs/models/classifiertargetout.md +++ b/docs/models/classifiertargetresult.md @@ -1,4 +1,4 @@ -# ClassifierTargetOut +# ClassifierTargetResult ## Fields diff --git a/docs/models/classifiertrainingparametersin.md b/docs/models/classifiertrainingparametersin.md deleted file mode 100644 index 1287c973..00000000 --- a/docs/models/classifiertrainingparametersin.md +++ /dev/null @@ -1,15 +0,0 @@ -# ClassifierTrainingParametersIn - -The fine-tuning hyperparameter settings used in a classifier fine-tune job. - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | -| `learning_rate` | *Optional[float]* | :heavy_minus_sign: | A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process. | -| `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large. | -| `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune) | -| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/codeinterpretertool.md b/docs/models/codeinterpretertool.md index 544cda93..6302fc62 100644 --- a/docs/models/codeinterpretertool.md +++ b/docs/models/codeinterpretertool.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | -| `type` | *Literal["code_interpreter"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["code_interpreter"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionargs.md b/docs/models/completionargs.md index 60d09137..148f7608 100644 --- a/docs/models/completionargs.md +++ b/docs/models/completionargs.md @@ -5,15 +5,15 @@ White-listed arguments from the completion API ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `stop` | [OptionalNullable[models.CompletionArgsStop]](../models/completionargsstop.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | -| `frequency_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | -| `top_p` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | -| `prediction` | [OptionalNullable[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `response_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | {
"type": "text"
} | -| `tool_choice` | [Optional[models.ToolChoiceEnum]](../models/toolchoiceenum.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `stop` | [OptionalNullable[models.CompletionArgsStop]](../models/completionargsstop.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `frequency_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `top_p` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `prediction` | [OptionalNullable[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | +| `response_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tool_choice` | [Optional[models.ToolChoiceEnum]](../models/toolchoiceenum.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/completiondetailedjoboutintegration.md b/docs/models/completiondetailedjoboutintegration.md deleted file mode 100644 index 9e526053..00000000 --- a/docs/models/completiondetailedjoboutintegration.md +++ /dev/null @@ -1,11 +0,0 @@ -# CompletionDetailedJobOutIntegration - - -## Supported Types - -### `models.WandbIntegrationOut` - -```python -value: models.WandbIntegrationOut = /* values here */ -``` - diff --git a/docs/models/completiondetailedjoboutrepository.md b/docs/models/completiondetailedjoboutrepository.md deleted file mode 100644 index 92a7b75c..00000000 --- a/docs/models/completiondetailedjoboutrepository.md +++ /dev/null @@ -1,11 +0,0 @@ -# CompletionDetailedJobOutRepository - - -## Supported Types - -### `models.GithubRepositoryOut` - -```python -value: models.GithubRepositoryOut = /* values here */ -``` - diff --git a/docs/models/completionfinetunedmodel.md b/docs/models/completionfinetunedmodel.md new file mode 100644 index 00000000..0055db02 --- /dev/null +++ b/docs/models/completionfinetunedmodel.md @@ -0,0 +1,22 @@ +# CompletionFineTunedModel + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `root_version` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.FineTunedModelCapabilities](../models/finetunedmodelcapabilities.md) | :heavy_check_mark: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `model_type` | *Literal["completion"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionjobout.md b/docs/models/completionfinetuningjob.md similarity index 97% rename from docs/models/completionjobout.md rename to docs/models/completionfinetuningjob.md index 5eb44eef..83c0ae7e 100644 --- a/docs/models/completionjobout.md +++ b/docs/models/completionfinetuningjob.md @@ -1,4 +1,4 @@ -# CompletionJobOut +# CompletionFineTuningJob ## Fields @@ -8,7 +8,7 @@ | `id` | *str* | :heavy_check_mark: | The ID of the job. | | `auto_start` | *bool* | :heavy_check_mark: | N/A | | `model` | *str* | :heavy_check_mark: | N/A | -| `status` | [models.CompletionJobOutStatus](../models/completionjoboutstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | +| `status` | [models.CompletionFineTuningJobStatus](../models/completionfinetuningjobstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | | `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | | `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | @@ -16,9 +16,9 @@ | `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | -| `integrations` | List[[models.CompletionJobOutIntegration](../models/completionjoboutintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | +| `integrations` | List[[models.CompletionFineTuningJobIntegration](../models/completionfinetuningjobintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | -| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.JobMetadata]](../models/jobmetadata.md) | :heavy_minus_sign: | N/A | | `job_type` | *Literal["completion"]* | :heavy_check_mark: | The type of job (`FT` for fine-tuning). | | `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | -| `repositories` | List[[models.CompletionJobOutRepository](../models/completionjoboutrepository.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `repositories` | List[[models.CompletionFineTuningJobRepository](../models/completionfinetuningjobrepository.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completiondetailedjobout.md b/docs/models/completionfinetuningjobdetails.md similarity index 94% rename from docs/models/completiondetailedjobout.md rename to docs/models/completionfinetuningjobdetails.md index bc7e5d1c..3c54e874 100644 --- a/docs/models/completiondetailedjobout.md +++ b/docs/models/completionfinetuningjobdetails.md @@ -1,4 +1,4 @@ -# CompletionDetailedJobOut +# CompletionFineTuningJobDetails ## Fields @@ -8,7 +8,7 @@ | `id` | *str* | :heavy_check_mark: | N/A | | `auto_start` | *bool* | :heavy_check_mark: | N/A | | `model` | *str* | :heavy_check_mark: | N/A | -| `status` | [models.CompletionDetailedJobOutStatus](../models/completiondetailedjoboutstatus.md) | :heavy_check_mark: | N/A | +| `status` | [models.CompletionFineTuningJobDetailsStatus](../models/completionfinetuningjobdetailsstatus.md) | :heavy_check_mark: | N/A | | `created_at` | *int* | :heavy_check_mark: | N/A | | `modified_at` | *int* | :heavy_check_mark: | N/A | | `training_files` | List[*str*] | :heavy_check_mark: | N/A | @@ -16,11 +16,11 @@ | `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | N/A | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `integrations` | List[[models.CompletionDetailedJobOutIntegration](../models/completiondetailedjoboutintegration.md)] | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.CompletionFineTuningJobDetailsIntegration](../models/completionfinetuningjobdetailsintegration.md)] | :heavy_minus_sign: | N/A | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.JobMetadata]](../models/jobmetadata.md) | :heavy_minus_sign: | N/A | | `job_type` | *Literal["completion"]* | :heavy_check_mark: | N/A | | `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | -| `repositories` | List[[models.CompletionDetailedJobOutRepository](../models/completiondetailedjoboutrepository.md)] | :heavy_minus_sign: | N/A | -| `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | -| `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `repositories` | List[[models.CompletionFineTuningJobDetailsRepository](../models/completionfinetuningjobdetailsrepository.md)] | :heavy_minus_sign: | N/A | +| `events` | List[[models.Event](../models/event.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | +| `checkpoints` | List[[models.Checkpoint](../models/checkpoint.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completionfinetuningjobdetailsintegration.md b/docs/models/completionfinetuningjobdetailsintegration.md new file mode 100644 index 00000000..38f6a349 --- /dev/null +++ b/docs/models/completionfinetuningjobdetailsintegration.md @@ -0,0 +1,11 @@ +# CompletionFineTuningJobDetailsIntegration + + +## Supported Types + +### `models.WandbIntegrationResult` + +```python +value: models.WandbIntegrationResult = /* values here */ +``` + diff --git a/docs/models/completionfinetuningjobdetailsrepository.md b/docs/models/completionfinetuningjobdetailsrepository.md new file mode 100644 index 00000000..c6bd67cd --- /dev/null +++ b/docs/models/completionfinetuningjobdetailsrepository.md @@ -0,0 +1,11 @@ +# CompletionFineTuningJobDetailsRepository + + +## Supported Types + +### `models.GithubRepository` + +```python +value: models.GithubRepository = /* values here */ +``` + diff --git a/docs/models/completiondetailedjoboutstatus.md b/docs/models/completionfinetuningjobdetailsstatus.md similarity index 94% rename from docs/models/completiondetailedjoboutstatus.md rename to docs/models/completionfinetuningjobdetailsstatus.md index b80525ba..94d795a9 100644 --- a/docs/models/completiondetailedjoboutstatus.md +++ b/docs/models/completionfinetuningjobdetailsstatus.md @@ -1,4 +1,4 @@ -# CompletionDetailedJobOutStatus +# CompletionFineTuningJobDetailsStatus ## Values diff --git a/docs/models/completionfinetuningjobintegration.md b/docs/models/completionfinetuningjobintegration.md new file mode 100644 index 00000000..dbe57417 --- /dev/null +++ b/docs/models/completionfinetuningjobintegration.md @@ -0,0 +1,11 @@ +# CompletionFineTuningJobIntegration + + +## Supported Types + +### `models.WandbIntegrationResult` + +```python +value: models.WandbIntegrationResult = /* values here */ +``` + diff --git a/docs/models/completionfinetuningjobrepository.md b/docs/models/completionfinetuningjobrepository.md new file mode 100644 index 00000000..54225e27 --- /dev/null +++ b/docs/models/completionfinetuningjobrepository.md @@ -0,0 +1,11 @@ +# CompletionFineTuningJobRepository + + +## Supported Types + +### `models.GithubRepository` + +```python +value: models.GithubRepository = /* values here */ +``` + diff --git a/docs/models/classifierjoboutstatus.md b/docs/models/completionfinetuningjobstatus.md similarity index 95% rename from docs/models/classifierjoboutstatus.md rename to docs/models/completionfinetuningjobstatus.md index 4520f164..db151a1b 100644 --- a/docs/models/classifierjoboutstatus.md +++ b/docs/models/completionfinetuningjobstatus.md @@ -1,4 +1,4 @@ -# ClassifierJobOutStatus +# CompletionFineTuningJobStatus The current status of the fine-tuning job. diff --git a/docs/models/completionftmodelout.md b/docs/models/completionftmodelout.md deleted file mode 100644 index ccd4844f..00000000 --- a/docs/models/completionftmodelout.md +++ /dev/null @@ -1,22 +0,0 @@ -# CompletionFTModelOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | -| `created` | *int* | :heavy_check_mark: | N/A | -| `owned_by` | *str* | :heavy_check_mark: | N/A | -| `workspace_id` | *str* | :heavy_check_mark: | N/A | -| `root` | *str* | :heavy_check_mark: | N/A | -| `root_version` | *str* | :heavy_check_mark: | N/A | -| `archived` | *bool* | :heavy_check_mark: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | -| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | -| `job` | *str* | :heavy_check_mark: | N/A | -| `model_type` | *Literal["completion"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionjoboutintegration.md b/docs/models/completionjoboutintegration.md deleted file mode 100644 index 6474747b..00000000 --- a/docs/models/completionjoboutintegration.md +++ /dev/null @@ -1,11 +0,0 @@ -# CompletionJobOutIntegration - - -## Supported Types - -### `models.WandbIntegrationOut` - -```python -value: models.WandbIntegrationOut = /* values here */ -``` - diff --git a/docs/models/completionjoboutrepository.md b/docs/models/completionjoboutrepository.md deleted file mode 100644 index 52f65558..00000000 --- a/docs/models/completionjoboutrepository.md +++ /dev/null @@ -1,11 +0,0 @@ -# CompletionJobOutRepository - - -## Supported Types - -### `models.GithubRepositoryOut` - -```python -value: models.GithubRepositoryOut = /* values here */ -``` - diff --git a/docs/models/completiontrainingparametersin.md b/docs/models/completiontrainingparametersin.md deleted file mode 100644 index 9fcc714e..00000000 --- a/docs/models/completiontrainingparametersin.md +++ /dev/null @@ -1,16 +0,0 @@ -# CompletionTrainingParametersIn - -The fine-tuning hyperparameter settings used in a fine-tune job. - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | -| `learning_rate` | *Optional[float]* | :heavy_minus_sign: | A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process. | -| `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large. | -| `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune) | -| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/confirmation.md b/docs/models/confirmation.md new file mode 100644 index 00000000..fd6e6aaa --- /dev/null +++ b/docs/models/confirmation.md @@ -0,0 +1,9 @@ +# Confirmation + + +## Values + +| Name | Value | +| ------- | ------- | +| `ALLOW` | allow | +| `DENY` | deny | \ No newline at end of file diff --git a/docs/models/conversationappendrequest.md b/docs/models/conversationappendrequest.md index 1cdb584b..78a96508 100644 --- a/docs/models/conversationappendrequest.md +++ b/docs/models/conversationappendrequest.md @@ -5,8 +5,9 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationAppendRequestHandoffExecution]](../models/conversationappendrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `tool_confirmations` | List[[models.ToolCallConfirmation](../models/toolcallconfirmation.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationappendstreamrequest.md b/docs/models/conversationappendstreamrequest.md index a8516ea7..daea9c52 100644 --- a/docs/models/conversationappendstreamrequest.md +++ b/docs/models/conversationappendstreamrequest.md @@ -5,8 +5,9 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationAppendStreamRequestHandoffExecution]](../models/conversationappendstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `tool_confirmations` | List[[models.ToolCallConfirmation](../models/toolcallconfirmation.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationhistory.md b/docs/models/conversationhistory.md index c8baad0b..daefe336 100644 --- a/docs/models/conversationhistory.md +++ b/docs/models/conversationhistory.md @@ -5,8 +5,8 @@ Retrieve all entries in a conversation. ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `object` | [Optional[models.ConversationHistoryObject]](../models/conversationhistoryobject.md) | :heavy_minus_sign: | N/A | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | -| `entries` | List[[models.Entry](../models/entry.md)] | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | +| `object` | *Optional[Literal["conversation.history"]]* | :heavy_minus_sign: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `entries` | List[[models.Entry](../models/entry.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationhistoryobject.md b/docs/models/conversationhistoryobject.md deleted file mode 100644 index a14e7f9c..00000000 --- a/docs/models/conversationhistoryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ConversationHistoryObject - - -## Values - -| Name | Value | -| ---------------------- | ---------------------- | -| `CONVERSATION_HISTORY` | conversation.history | \ No newline at end of file diff --git a/docs/models/conversationmessages.md b/docs/models/conversationmessages.md index c3f00979..8fa51571 100644 --- a/docs/models/conversationmessages.md +++ b/docs/models/conversationmessages.md @@ -5,8 +5,8 @@ Similar to the conversation history but only keep the messages ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `object` | [Optional[models.ConversationMessagesObject]](../models/conversationmessagesobject.md) | :heavy_minus_sign: | N/A | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | -| `messages` | List[[models.MessageEntries](../models/messageentries.md)] | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `object` | *Optional[Literal["conversation.messages"]]* | :heavy_minus_sign: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `messages` | List[[models.MessageEntries](../models/messageentries.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationmessagesobject.md b/docs/models/conversationmessagesobject.md deleted file mode 100644 index db3a441b..00000000 --- a/docs/models/conversationmessagesobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ConversationMessagesObject - - -## Values - -| Name | Value | -| ----------------------- | ----------------------- | -| `CONVERSATION_MESSAGES` | conversation.messages | \ No newline at end of file diff --git a/docs/models/conversationresponse.md b/docs/models/conversationresponse.md index e3182128..2732f785 100644 --- a/docs/models/conversationresponse.md +++ b/docs/models/conversationresponse.md @@ -5,9 +5,9 @@ The response after appending new entries to the conversation. ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `object` | [Optional[models.ConversationResponseObject]](../models/conversationresponseobject.md) | :heavy_minus_sign: | N/A | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | -| `outputs` | List[[models.Output](../models/output.md)] | :heavy_check_mark: | N/A | -| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | +| `object` | *Optional[Literal["conversation.response"]]* | :heavy_minus_sign: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `outputs` | List[[models.Output](../models/output.md)] | :heavy_check_mark: | N/A | +| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationresponseobject.md b/docs/models/conversationresponseobject.md deleted file mode 100644 index bea66e52..00000000 --- a/docs/models/conversationresponseobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ConversationResponseObject - - -## Values - -| Name | Value | -| ----------------------- | ----------------------- | -| `CONVERSATION_RESPONSE` | conversation.response | \ No newline at end of file diff --git a/docs/models/conversationrestartrequest.md b/docs/models/conversationrestartrequest.md index d9865312..ad3ff362 100644 --- a/docs/models/conversationrestartrequest.md +++ b/docs/models/conversationrestartrequest.md @@ -7,7 +7,7 @@ Request to restart a new conversation from a given entry in the conversation. | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | diff --git a/docs/models/conversationrestartstreamrequest.md b/docs/models/conversationrestartstreamrequest.md index a5f8cbe7..865a1e8f 100644 --- a/docs/models/conversationrestartstreamrequest.md +++ b/docs/models/conversationrestartstreamrequest.md @@ -7,7 +7,7 @@ Request to restart a new conversation from a given entry in the conversation. | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | diff --git a/docs/models/conversationthinkchunk.md b/docs/models/conversationthinkchunk.md new file mode 100644 index 00000000..1fb16bd9 --- /dev/null +++ b/docs/models/conversationthinkchunk.md @@ -0,0 +1,10 @@ +# ConversationThinkChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `type` | *Optional[Literal["thinking"]]* | :heavy_minus_sign: | N/A | +| `thinking` | List[[models.ConversationThinkChunkThinking](../models/conversationthinkchunkthinking.md)] | :heavy_check_mark: | N/A | +| `closed` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationthinkchunkthinking.md b/docs/models/conversationthinkchunkthinking.md new file mode 100644 index 00000000..84b80018 --- /dev/null +++ b/docs/models/conversationthinkchunkthinking.md @@ -0,0 +1,17 @@ +# ConversationThinkChunkThinking + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ToolReferenceChunk` + +```python +value: models.ToolReferenceChunk = /* values here */ +``` + diff --git a/docs/models/agentupdaterequest.md b/docs/models/createagentrequest.md similarity index 80% rename from docs/models/agentupdaterequest.md rename to docs/models/createagentrequest.md index b1830d7b..cca3a079 100644 --- a/docs/models/agentupdaterequest.md +++ b/docs/models/createagentrequest.md @@ -1,4 +1,4 @@ -# AgentUpdateRequest +# CreateAgentRequest ## Fields @@ -6,12 +6,11 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentUpdateRequestTool](../models/agentupdaterequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `tools` | List[[models.CreateAgentRequestTool](../models/createagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentupdaterequesttool.md b/docs/models/createagentrequesttool.md similarity index 96% rename from docs/models/agentupdaterequesttool.md rename to docs/models/createagentrequesttool.md index ce553126..c6ed3e98 100644 --- a/docs/models/agentupdaterequesttool.md +++ b/docs/models/createagentrequesttool.md @@ -1,4 +1,4 @@ -# AgentUpdateRequestTool +# CreateAgentRequestTool ## Supported Types diff --git a/docs/models/batchjobin.md b/docs/models/createbatchjobrequest.md similarity index 99% rename from docs/models/batchjobin.md rename to docs/models/createbatchjobrequest.md index 7dcf265d..d094e2d5 100644 --- a/docs/models/batchjobin.md +++ b/docs/models/createbatchjobrequest.md @@ -1,4 +1,4 @@ -# BatchJobIn +# CreateBatchJobRequest ## Fields @@ -8,7 +8,7 @@ | `input_files` | List[*str*] | :heavy_minus_sign: | The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a "body" field. An example of such file is the following: ```json {"custom_id": "0", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French cheese?"}]}} {"custom_id": "1", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French wine?"}]}} ``` | | | `requests` | List[[models.BatchRequest](../models/batchrequest.md)] | :heavy_minus_sign: | N/A | | | `endpoint` | [models.APIEndpoint](../models/apiendpoint.md) | :heavy_check_mark: | N/A | | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model to be used for batch inference. | mistral-small-latest | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model to be used for batch inference. | **Example 1:** mistral-small-latest
**Example 2:** mistral-medium-latest | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. | | | `metadata` | Dict[str, *str*] | :heavy_minus_sign: | The metadata of your choice to be associated with the batch inference job. | | | `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | The timeout in hours for the batch inference job. | | \ No newline at end of file diff --git a/docs/models/uploadfileout.md b/docs/models/createfileresponse.md similarity index 99% rename from docs/models/uploadfileout.md rename to docs/models/createfileresponse.md index 6f09c9a6..8152922b 100644 --- a/docs/models/uploadfileout.md +++ b/docs/models/createfileresponse.md @@ -1,4 +1,4 @@ -# UploadFileOut +# CreateFileResponse ## Fields diff --git a/docs/models/jobin.md b/docs/models/createfinetuningjobrequest.md similarity index 97% rename from docs/models/jobin.md rename to docs/models/createfinetuningjobrequest.md index 62da9072..a93e323d 100644 --- a/docs/models/jobin.md +++ b/docs/models/createfinetuningjobrequest.md @@ -1,4 +1,4 @@ -# JobIn +# CreateFineTuningJobRequest ## Fields @@ -9,10 +9,10 @@ | `training_files` | List[[models.TrainingFile](../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | -| `integrations` | List[[models.JobInIntegration](../models/jobinintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `integrations` | List[[models.CreateFineTuningJobRequestIntegration](../models/createfinetuningjobrequestintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | | `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | | `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | | `job_type` | [OptionalNullable[models.FineTuneableModelType]](../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | | `hyperparameters` | [models.Hyperparameters](../models/hyperparameters.md) | :heavy_check_mark: | N/A | -| `repositories` | List[[models.JobInRepository](../models/jobinrepository.md)] | :heavy_minus_sign: | N/A | -| `classifier_targets` | List[[models.ClassifierTargetIn](../models/classifiertargetin.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `repositories` | List[[models.CreateFineTuningJobRequestRepository](../models/createfinetuningjobrequestrepository.md)] | :heavy_minus_sign: | N/A | +| `classifier_targets` | List[[models.ClassifierTarget](../models/classifiertarget.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobinintegration.md b/docs/models/createfinetuningjobrequestintegration.md similarity index 74% rename from docs/models/jobinintegration.md rename to docs/models/createfinetuningjobrequestintegration.md index 103820e7..0054a4a6 100644 --- a/docs/models/jobinintegration.md +++ b/docs/models/createfinetuningjobrequestintegration.md @@ -1,4 +1,4 @@ -# JobInIntegration +# CreateFineTuningJobRequestIntegration ## Supported Types diff --git a/docs/models/jobinrepository.md b/docs/models/createfinetuningjobrequestrepository.md similarity index 75% rename from docs/models/jobinrepository.md rename to docs/models/createfinetuningjobrequestrepository.md index e873ae63..32be1b6d 100644 --- a/docs/models/jobinrepository.md +++ b/docs/models/createfinetuningjobrequestrepository.md @@ -1,4 +1,4 @@ -# JobInRepository +# CreateFineTuningJobRequestRepository ## Supported Types diff --git a/docs/models/createfinetuningjobresponse.md b/docs/models/createfinetuningjobresponse.md deleted file mode 100644 index f82cd793..00000000 --- a/docs/models/createfinetuningjobresponse.md +++ /dev/null @@ -1,19 +0,0 @@ -# CreateFineTuningJobResponse - -OK - - -## Supported Types - -### `models.Response` - -```python -value: models.Response = /* values here */ -``` - -### `models.LegacyJobMetadataOut` - -```python -value: models.LegacyJobMetadataOut = /* values here */ -``` - diff --git a/docs/models/libraryin.md b/docs/models/createlibraryrequest.md similarity index 95% rename from docs/models/libraryin.md rename to docs/models/createlibraryrequest.md index d6b11914..71562806 100644 --- a/docs/models/libraryin.md +++ b/docs/models/createlibraryrequest.md @@ -1,4 +1,4 @@ -# LibraryIn +# CreateLibraryRequest ## Fields diff --git a/docs/models/deletefileout.md b/docs/models/deletefileresponse.md similarity index 97% rename from docs/models/deletefileout.md rename to docs/models/deletefileresponse.md index 4709cc49..188e2504 100644 --- a/docs/models/deletefileout.md +++ b/docs/models/deletefileresponse.md @@ -1,4 +1,4 @@ -# DeleteFileOut +# DeleteFileResponse ## Fields diff --git a/docs/models/deletemodelrequest.md b/docs/models/deletemodelv1modelsmodeliddeleterequest.md similarity index 94% rename from docs/models/deletemodelrequest.md rename to docs/models/deletemodelv1modelsmodeliddeleterequest.md index d80103f1..d9bc15fe 100644 --- a/docs/models/deletemodelrequest.md +++ b/docs/models/deletemodelv1modelsmodeliddeleterequest.md @@ -1,4 +1,4 @@ -# DeleteModelRequest +# DeleteModelV1ModelsModelIDDeleteRequest ## Fields diff --git a/docs/models/document.md b/docs/models/document.md index 509d43b7..284babb9 100644 --- a/docs/models/document.md +++ b/docs/models/document.md @@ -1,25 +1,26 @@ # Document -Document to run OCR on - - -## Supported Types - -### `models.FileChunk` - -```python -value: models.FileChunk = /* values here */ -``` - -### `models.DocumentURLChunk` - -```python -value: models.DocumentURLChunk = /* values here */ -``` - -### `models.ImageURLChunk` - -```python -value: models.ImageURLChunk = /* values here */ -``` +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `hash` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `mime_type` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `extension` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `size` | *Nullable[int]* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `summary` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `last_processed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `number_of_pages` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `uploaded_by_id` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `uploaded_by_type` | *str* | :heavy_check_mark: | N/A | +| `tokens_processing_main_content` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `tokens_processing_summary` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `attributes` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `processing_status` | *str* | :heavy_check_mark: | N/A | +| `tokens_processing_total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/documentlibrarytool.md b/docs/models/documentlibrarytool.md index 1695bad4..95c3fa52 100644 --- a/docs/models/documentlibrarytool.md +++ b/docs/models/documentlibrarytool.md @@ -3,7 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | -| `type` | *Literal["document_library"]* | :heavy_check_mark: | N/A | -| `library_ids` | List[*str*] | :heavy_check_mark: | Ids of the library in which to search. | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["document_library"]* | :heavy_check_mark: | N/A | +| `library_ids` | List[*str*] | :heavy_check_mark: | Ids of the library in which to search. | \ No newline at end of file diff --git a/docs/models/documentout.md b/docs/models/documentout.md deleted file mode 100644 index 28df11eb..00000000 --- a/docs/models/documentout.md +++ /dev/null @@ -1,26 +0,0 @@ -# DocumentOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `hash` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `mime_type` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `extension` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `size` | *Nullable[int]* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `summary` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `last_processed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `number_of_pages` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `processing_status` | *str* | :heavy_check_mark: | N/A | -| `uploaded_by_id` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `uploaded_by_type` | *str* | :heavy_check_mark: | N/A | -| `tokens_processing_main_content` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `tokens_processing_summary` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `attributes` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `tokens_processing_total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/documentunion.md b/docs/models/documentunion.md new file mode 100644 index 00000000..e573bd46 --- /dev/null +++ b/docs/models/documentunion.md @@ -0,0 +1,25 @@ +# DocumentUnion + +Document to run OCR on + + +## Supported Types + +### `models.FileChunk` + +```python +value: models.FileChunk = /* values here */ +``` + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + diff --git a/docs/models/documentupdatein.md b/docs/models/documentupdatein.md deleted file mode 100644 index 0993886d..00000000 --- a/docs/models/documentupdatein.md +++ /dev/null @@ -1,9 +0,0 @@ -# DocumentUpdateIn - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `attributes` | Dict[str, [models.Attributes](../models/attributes.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/documenturlchunk.md b/docs/models/documenturlchunk.md index 6c9a5b4d..9dbfbe50 100644 --- a/docs/models/documenturlchunk.md +++ b/docs/models/documenturlchunk.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -| `document_url` | *str* | :heavy_check_mark: | N/A | -| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | -| `type` | [Optional[models.DocumentURLChunkType]](../models/documenturlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | +| `type` | *Optional[Literal["document_url"]]* | :heavy_minus_sign: | N/A | +| `document_url` | *str* | :heavy_check_mark: | N/A | +| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | \ No newline at end of file diff --git a/docs/models/documenturlchunktype.md b/docs/models/documenturlchunktype.md deleted file mode 100644 index 32e1fa9e..00000000 --- a/docs/models/documenturlchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# DocumentURLChunkType - - -## Values - -| Name | Value | -| -------------- | -------------- | -| `DOCUMENT_URL` | document_url | \ No newline at end of file diff --git a/docs/models/eventout.md b/docs/models/event.md similarity index 98% rename from docs/models/eventout.md rename to docs/models/event.md index d9202353..3eebffca 100644 --- a/docs/models/eventout.md +++ b/docs/models/event.md @@ -1,4 +1,4 @@ -# EventOut +# Event ## Fields diff --git a/docs/models/downloadfilerequest.md b/docs/models/filesapiroutesdeletefilerequest.md similarity index 88% rename from docs/models/downloadfilerequest.md rename to docs/models/filesapiroutesdeletefilerequest.md index 3f4dc6cc..1b02c2db 100644 --- a/docs/models/downloadfilerequest.md +++ b/docs/models/filesapiroutesdeletefilerequest.md @@ -1,4 +1,4 @@ -# DownloadFileRequest +# FilesAPIRoutesDeleteFileRequest ## Fields diff --git a/docs/models/retrievefilerequest.md b/docs/models/filesapiroutesdownloadfilerequest.md similarity index 88% rename from docs/models/retrievefilerequest.md rename to docs/models/filesapiroutesdownloadfilerequest.md index 454b9665..8b28cb0e 100644 --- a/docs/models/retrievefilerequest.md +++ b/docs/models/filesapiroutesdownloadfilerequest.md @@ -1,4 +1,4 @@ -# RetrieveFileRequest +# FilesAPIRoutesDownloadFileRequest ## Fields diff --git a/docs/models/getfilesignedurlrequest.md b/docs/models/filesapiroutesgetsignedurlrequest.md similarity index 96% rename from docs/models/getfilesignedurlrequest.md rename to docs/models/filesapiroutesgetsignedurlrequest.md index 0be3b288..dbe3c801 100644 --- a/docs/models/getfilesignedurlrequest.md +++ b/docs/models/filesapiroutesgetsignedurlrequest.md @@ -1,4 +1,4 @@ -# GetFileSignedURLRequest +# FilesAPIRoutesGetSignedURLRequest ## Fields diff --git a/docs/models/listfilesrequest.md b/docs/models/filesapirouteslistfilesrequest.md similarity index 98% rename from docs/models/listfilesrequest.md rename to docs/models/filesapirouteslistfilesrequest.md index 2d76a76b..57d11722 100644 --- a/docs/models/listfilesrequest.md +++ b/docs/models/filesapirouteslistfilesrequest.md @@ -1,4 +1,4 @@ -# ListFilesRequest +# FilesAPIRoutesListFilesRequest ## Fields diff --git a/docs/models/deletefilerequest.md b/docs/models/filesapiroutesretrievefilerequest.md similarity index 88% rename from docs/models/deletefilerequest.md rename to docs/models/filesapiroutesretrievefilerequest.md index bceae901..961bae1f 100644 --- a/docs/models/deletefilerequest.md +++ b/docs/models/filesapiroutesretrievefilerequest.md @@ -1,4 +1,4 @@ -# DeleteFileRequest +# FilesAPIRoutesRetrieveFileRequest ## Fields diff --git a/docs/models/ftmodelcapabilitiesout.md b/docs/models/finetunedmodelcapabilities.md similarity index 95% rename from docs/models/ftmodelcapabilitiesout.md rename to docs/models/finetunedmodelcapabilities.md index 19690476..d3203a2a 100644 --- a/docs/models/ftmodelcapabilitiesout.md +++ b/docs/models/finetunedmodelcapabilities.md @@ -1,4 +1,4 @@ -# FTModelCapabilitiesOut +# FineTunedModelCapabilities ## Fields diff --git a/docs/models/functioncallentry.md b/docs/models/functioncallentry.md index fd3aa5c5..2843db9d 100644 --- a/docs/models/functioncallentry.md +++ b/docs/models/functioncallentry.md @@ -3,13 +3,16 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `object` | [Optional[models.FunctionCallEntryObject]](../models/functioncallentryobject.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.FunctionCallEntryType]](../models/functioncallentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `tool_call_id` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `arguments` | [models.FunctionCallEntryArguments](../models/functioncallentryarguments.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["function.call"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `arguments` | [models.FunctionCallEntryArguments](../models/functioncallentryarguments.md) | :heavy_check_mark: | N/A | +| `confirmation_status` | [OptionalNullable[models.FunctionCallEntryConfirmationStatus]](../models/functioncallentryconfirmationstatus.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functioncallentryconfirmationstatus.md b/docs/models/functioncallentryconfirmationstatus.md new file mode 100644 index 00000000..8948beb6 --- /dev/null +++ b/docs/models/functioncallentryconfirmationstatus.md @@ -0,0 +1,10 @@ +# FunctionCallEntryConfirmationStatus + + +## Values + +| Name | Value | +| --------- | --------- | +| `PENDING` | pending | +| `ALLOWED` | allowed | +| `DENIED` | denied | \ No newline at end of file diff --git a/docs/models/functioncallentryobject.md b/docs/models/functioncallentryobject.md deleted file mode 100644 index 3cf2e427..00000000 --- a/docs/models/functioncallentryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# FunctionCallEntryObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/functioncallentrytype.md b/docs/models/functioncallentrytype.md deleted file mode 100644 index 7ea34c52..00000000 --- a/docs/models/functioncallentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FunctionCallEntryType - - -## Values - -| Name | Value | -| --------------- | --------------- | -| `FUNCTION_CALL` | function.call | \ No newline at end of file diff --git a/docs/models/functioncallevent.md b/docs/models/functioncallevent.md index f4062060..0e3a36d6 100644 --- a/docs/models/functioncallevent.md +++ b/docs/models/functioncallevent.md @@ -3,12 +3,15 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `type` | *Literal["function.call.delta"]* | :heavy_check_mark: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `tool_call_id` | *str* | :heavy_check_mark: | N/A | -| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | +| `type` | *Literal["function.call.delta"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | +| `confirmation_status` | [OptionalNullable[models.FunctionCallEventConfirmationStatus]](../models/functioncalleventconfirmationstatus.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functioncalleventconfirmationstatus.md b/docs/models/functioncalleventconfirmationstatus.md new file mode 100644 index 00000000..4a3c8774 --- /dev/null +++ b/docs/models/functioncalleventconfirmationstatus.md @@ -0,0 +1,10 @@ +# FunctionCallEventConfirmationStatus + + +## Values + +| Name | Value | +| --------- | --------- | +| `PENDING` | pending | +| `ALLOWED` | allowed | +| `DENIED` | denied | \ No newline at end of file diff --git a/docs/models/functionresultentry.md b/docs/models/functionresultentry.md index 6df54d3d..6a77abfd 100644 --- a/docs/models/functionresultentry.md +++ b/docs/models/functionresultentry.md @@ -3,12 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `object` | [Optional[models.FunctionResultEntryObject]](../models/functionresultentryobject.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.FunctionResultEntryType]](../models/functionresultentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `tool_call_id` | *str* | :heavy_check_mark: | N/A | -| `result` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["function.result"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `result` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/functionresultentryobject.md b/docs/models/functionresultentryobject.md deleted file mode 100644 index fe52e0a5..00000000 --- a/docs/models/functionresultentryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# FunctionResultEntryObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/functionresultentrytype.md b/docs/models/functionresultentrytype.md deleted file mode 100644 index 35c94d8e..00000000 --- a/docs/models/functionresultentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FunctionResultEntryType - - -## Values - -| Name | Value | -| ----------------- | ----------------- | -| `FUNCTION_RESULT` | function.result | \ No newline at end of file diff --git a/docs/models/getdocumenttextcontentrequest.md b/docs/models/getdocumenttextcontentrequest.md deleted file mode 100644 index 85933401..00000000 --- a/docs/models/getdocumenttextcontentrequest.md +++ /dev/null @@ -1,9 +0,0 @@ -# GetDocumentTextContentRequest - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/retrievefileout.md b/docs/models/getfileresponse.md similarity index 99% rename from docs/models/retrievefileout.md rename to docs/models/getfileresponse.md index 28f97dd2..0edd13e0 100644 --- a/docs/models/retrievefileout.md +++ b/docs/models/getfileresponse.md @@ -1,4 +1,4 @@ -# RetrieveFileOut +# GetFileResponse ## Fields diff --git a/docs/models/getfinetuningjobresponse.md b/docs/models/getfinetuningjobresponse.md deleted file mode 100644 index 1b0568dd..00000000 --- a/docs/models/getfinetuningjobresponse.md +++ /dev/null @@ -1,19 +0,0 @@ -# GetFineTuningJobResponse - -OK - - -## Supported Types - -### `models.ClassifierDetailedJobOut` - -```python -value: models.ClassifierDetailedJobOut = /* values here */ -``` - -### `models.CompletionDetailedJobOut` - -```python -value: models.CompletionDetailedJobOut = /* values here */ -``` - diff --git a/docs/models/filesignedurl.md b/docs/models/getsignedurlresponse.md similarity index 92% rename from docs/models/filesignedurl.md rename to docs/models/getsignedurlresponse.md index 52ce3f4f..bde69323 100644 --- a/docs/models/filesignedurl.md +++ b/docs/models/getsignedurlresponse.md @@ -1,4 +1,4 @@ -# FileSignedURL +# GetSignedURLResponse ## Fields diff --git a/docs/models/githubrepositoryout.md b/docs/models/githubrepository.md similarity index 97% rename from docs/models/githubrepositoryout.md rename to docs/models/githubrepository.md index fe38393a..827b6f34 100644 --- a/docs/models/githubrepositoryout.md +++ b/docs/models/githubrepository.md @@ -1,4 +1,4 @@ -# GithubRepositoryOut +# GithubRepository ## Fields diff --git a/docs/models/hyperparameters.md b/docs/models/hyperparameters.md index 46a6dd6b..b6c00c36 100644 --- a/docs/models/hyperparameters.md +++ b/docs/models/hyperparameters.md @@ -3,15 +3,15 @@ ## Supported Types -### `models.CompletionTrainingParametersIn` +### `models.CompletionTrainingParameters` ```python -value: models.CompletionTrainingParametersIn = /* values here */ +value: models.CompletionTrainingParameters = /* values here */ ``` -### `models.ClassifierTrainingParametersIn` +### `models.ClassifierTrainingParameters` ```python -value: models.ClassifierTrainingParametersIn = /* values here */ +value: models.ClassifierTrainingParameters = /* values here */ ``` diff --git a/docs/models/imagedetail.md b/docs/models/imagedetail.md new file mode 100644 index 00000000..1e5ba3fd --- /dev/null +++ b/docs/models/imagedetail.md @@ -0,0 +1,10 @@ +# ImageDetail + + +## Values + +| Name | Value | +| ------ | ------ | +| `LOW` | low | +| `AUTO` | auto | +| `HIGH` | high | \ No newline at end of file diff --git a/docs/models/imagegenerationtool.md b/docs/models/imagegenerationtool.md index 0c8de72c..b476b6f2 100644 --- a/docs/models/imagegenerationtool.md +++ b/docs/models/imagegenerationtool.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | -| `type` | *Literal["image_generation"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["image_generation"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/imageurl.md b/docs/models/imageurl.md index 7c2bcbc3..6358e0ac 100644 --- a/docs/models/imageurl.md +++ b/docs/models/imageurl.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `url` | *str* | :heavy_check_mark: | N/A | -| `detail` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `url` | *str* | :heavy_check_mark: | N/A | +| `detail` | [OptionalNullable[models.ImageDetail]](../models/imagedetail.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/imageurlchunk.md b/docs/models/imageurlchunk.md index 43078c78..db0c53d2 100644 --- a/docs/models/imageurlchunk.md +++ b/docs/models/imageurlchunk.md @@ -5,7 +5,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `image_url` | [models.ImageURLUnion](../models/imageurlunion.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ImageURLChunkType]](../models/imageurlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `type` | *Optional[Literal["image_url"]]* | :heavy_minus_sign: | N/A | +| `image_url` | [models.ImageURLUnion](../models/imageurlunion.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/imageurlchunktype.md b/docs/models/imageurlchunktype.md deleted file mode 100644 index 2064a0b4..00000000 --- a/docs/models/imageurlchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ImageURLChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `IMAGE_URL` | image_url | \ No newline at end of file diff --git a/docs/models/inputs.md b/docs/models/inputs.md index 0f62a7ce..d5771207 100644 --- a/docs/models/inputs.md +++ b/docs/models/inputs.md @@ -5,10 +5,10 @@ Chat to classify ## Supported Types -### `models.InstructRequestInputs` +### `models.InstructRequest` ```python -value: models.InstructRequestInputs = /* values here */ +value: models.InstructRequest = /* values here */ ``` ### `List[models.InstructRequest]` diff --git a/docs/models/inputsmessage.md b/docs/models/inputsmessage.md deleted file mode 100644 index e3543fb4..00000000 --- a/docs/models/inputsmessage.md +++ /dev/null @@ -1,29 +0,0 @@ -# InputsMessage - - -## Supported Types - -### `models.AssistantMessage` - -```python -value: models.AssistantMessage = /* values here */ -``` - -### `models.SystemMessage` - -```python -value: models.SystemMessage = /* values here */ -``` - -### `models.ToolMessage` - -```python -value: models.ToolMessage = /* values here */ -``` - -### `models.UserMessage` - -```python -value: models.UserMessage = /* values here */ -``` - diff --git a/docs/models/instructrequestinputs.md b/docs/models/instructrequestinputs.md deleted file mode 100644 index 931ae5e4..00000000 --- a/docs/models/instructrequestinputs.md +++ /dev/null @@ -1,8 +0,0 @@ -# InstructRequestInputs - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `messages` | List[[models.InputsMessage](../models/inputsmessage.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/jobmetadataout.md b/docs/models/jobmetadata.md similarity index 98% rename from docs/models/jobmetadataout.md rename to docs/models/jobmetadata.md index 6218a161..5d8a89dd 100644 --- a/docs/models/jobmetadataout.md +++ b/docs/models/jobmetadata.md @@ -1,4 +1,4 @@ -# JobMetadataOut +# JobMetadata ## Fields diff --git a/docs/models/cancelbatchjobrequest.md b/docs/models/jobsapiroutesbatchcancelbatchjobrequest.md similarity index 86% rename from docs/models/cancelbatchjobrequest.md rename to docs/models/jobsapiroutesbatchcancelbatchjobrequest.md index f31f843b..c19d0241 100644 --- a/docs/models/cancelbatchjobrequest.md +++ b/docs/models/jobsapiroutesbatchcancelbatchjobrequest.md @@ -1,4 +1,4 @@ -# CancelBatchJobRequest +# JobsAPIRoutesBatchCancelBatchJobRequest ## Fields diff --git a/docs/models/getbatchjobrequest.md b/docs/models/jobsapiroutesbatchgetbatchjobrequest.md similarity index 92% rename from docs/models/getbatchjobrequest.md rename to docs/models/jobsapiroutesbatchgetbatchjobrequest.md index f3c67eb4..8c259bea 100644 --- a/docs/models/getbatchjobrequest.md +++ b/docs/models/jobsapiroutesbatchgetbatchjobrequest.md @@ -1,4 +1,4 @@ -# GetBatchJobRequest +# JobsAPIRoutesBatchGetBatchJobRequest ## Fields diff --git a/docs/models/listbatchjobsrequest.md b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md similarity index 98% rename from docs/models/listbatchjobsrequest.md rename to docs/models/jobsapiroutesbatchgetbatchjobsrequest.md index 19981b24..5ceb0b2c 100644 --- a/docs/models/listbatchjobsrequest.md +++ b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md @@ -1,4 +1,4 @@ -# ListBatchJobsRequest +# JobsAPIRoutesBatchGetBatchJobsRequest ## Fields diff --git a/docs/models/archivemodelrequest.md b/docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md similarity index 93% rename from docs/models/archivemodelrequest.md rename to docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md index 806d135e..f9700df5 100644 --- a/docs/models/archivemodelrequest.md +++ b/docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md @@ -1,4 +1,4 @@ -# ArchiveModelRequest +# JobsAPIRoutesFineTuningArchiveFineTunedModelRequest ## Fields diff --git a/docs/models/cancelfinetuningjobrequest.md b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md similarity index 88% rename from docs/models/cancelfinetuningjobrequest.md rename to docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md index 6525788c..883cbac6 100644 --- a/docs/models/cancelfinetuningjobrequest.md +++ b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md @@ -1,4 +1,4 @@ -# CancelFineTuningJobRequest +# JobsAPIRoutesFineTuningCancelFineTuningJobRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md new file mode 100644 index 00000000..fb62eb62 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningCancelFineTuningJobResponse + +OK + + +## Supported Types + +### `models.ClassifierFineTuningJobDetails` + +```python +value: models.ClassifierFineTuningJobDetails = /* values here */ +``` + +### `models.CompletionFineTuningJobDetails` + +```python +value: models.CompletionFineTuningJobDetails = /* values here */ +``` + diff --git a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md new file mode 100644 index 00000000..7b52e2ca --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningCreateFineTuningJobResponse + +OK + + +## Supported Types + +### `models.Response` + +```python +value: models.Response = /* values here */ +``` + +### `models.LegacyJobMetadata` + +```python +value: models.LegacyJobMetadata = /* values here */ +``` + diff --git a/docs/models/getfinetuningjobrequest.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md similarity index 89% rename from docs/models/getfinetuningjobrequest.md rename to docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md index f20cb214..fde19800 100644 --- a/docs/models/getfinetuningjobrequest.md +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md @@ -1,4 +1,4 @@ -# GetFineTuningJobRequest +# JobsAPIRoutesFineTuningGetFineTuningJobRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md new file mode 100644 index 00000000..f7705327 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningGetFineTuningJobResponse + +OK + + +## Supported Types + +### `models.ClassifierFineTuningJobDetails` + +```python +value: models.ClassifierFineTuningJobDetails = /* values here */ +``` + +### `models.CompletionFineTuningJobDetails` + +```python +value: models.CompletionFineTuningJobDetails = /* values here */ +``` + diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md new file mode 100644 index 00000000..23c52c34 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md @@ -0,0 +1,17 @@ +# JobsAPIRoutesFineTuningGetFineTuningJobsRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus]](../models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | \ No newline at end of file diff --git a/docs/models/listfinetuningjobsstatus.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md similarity index 94% rename from docs/models/listfinetuningjobsstatus.md rename to docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md index 07db9ae5..40d57686 100644 --- a/docs/models/listfinetuningjobsstatus.md +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md @@ -1,4 +1,4 @@ -# ListFineTuningJobsStatus +# JobsAPIRoutesFineTuningGetFineTuningJobsStatus The current job state to filter on. When set, the other results are not displayed. diff --git a/docs/models/startfinetuningjobrequest.md b/docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md similarity index 84% rename from docs/models/startfinetuningjobrequest.md rename to docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md index 9df5aee8..4429fe48 100644 --- a/docs/models/startfinetuningjobrequest.md +++ b/docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md @@ -1,4 +1,4 @@ -# StartFineTuningJobRequest +# JobsAPIRoutesFineTuningStartFineTuningJobRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md new file mode 100644 index 00000000..1a7e71d4 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningStartFineTuningJobResponse + +OK + + +## Supported Types + +### `models.ClassifierFineTuningJobDetails` + +```python +value: models.ClassifierFineTuningJobDetails = /* values here */ +``` + +### `models.CompletionFineTuningJobDetails` + +```python +value: models.CompletionFineTuningJobDetails = /* values here */ +``` + diff --git a/docs/models/unarchivemodelrequest.md b/docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md similarity index 92% rename from docs/models/unarchivemodelrequest.md rename to docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md index 033dad8a..95c1734d 100644 --- a/docs/models/unarchivemodelrequest.md +++ b/docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md @@ -1,4 +1,4 @@ -# UnarchiveModelRequest +# JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md new file mode 100644 index 00000000..dbe49a86 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md @@ -0,0 +1,9 @@ +# JobsAPIRoutesFineTuningUpdateFineTunedModelRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to update. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `update_model_request` | [models.UpdateModelRequest](../models/updatemodelrequest.md) | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md new file mode 100644 index 00000000..f40350bf --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningUpdateFineTunedModelResponse + +OK + + +## Supported Types + +### `models.ClassifierFineTunedModel` + +```python +value: models.ClassifierFineTunedModel = /* values here */ +``` + +### `models.CompletionFineTunedModel` + +```python +value: models.CompletionFineTunedModel = /* values here */ +``` + diff --git a/docs/models/jobsout.md b/docs/models/jobsout.md deleted file mode 100644 index 69f8342a..00000000 --- a/docs/models/jobsout.md +++ /dev/null @@ -1,10 +0,0 @@ -# JobsOut - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `data` | List[[models.JobsOutData](../models/jobsoutdata.md)] | :heavy_minus_sign: | N/A | -| `object` | *Optional[Literal["list"]]* | :heavy_minus_sign: | N/A | -| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/jobsoutdata.md b/docs/models/jobsoutdata.md deleted file mode 100644 index 28cec311..00000000 --- a/docs/models/jobsoutdata.md +++ /dev/null @@ -1,17 +0,0 @@ -# JobsOutData - - -## Supported Types - -### `models.ClassifierJobOut` - -```python -value: models.ClassifierJobOut = /* values here */ -``` - -### `models.CompletionJobOut` - -```python -value: models.CompletionJobOut = /* values here */ -``` - diff --git a/docs/models/legacyjobmetadataout.md b/docs/models/legacyjobmetadata.md similarity index 99% rename from docs/models/legacyjobmetadataout.md rename to docs/models/legacyjobmetadata.md index 8a712140..4705ab4f 100644 --- a/docs/models/legacyjobmetadataout.md +++ b/docs/models/legacyjobmetadata.md @@ -1,4 +1,4 @@ -# LegacyJobMetadataOut +# LegacyJobMetadata ## Fields diff --git a/docs/models/getlibraryrequest.md b/docs/models/librariesdeletev1request.md similarity index 90% rename from docs/models/getlibraryrequest.md rename to docs/models/librariesdeletev1request.md index 2a3acf50..68d7e543 100644 --- a/docs/models/getlibraryrequest.md +++ b/docs/models/librariesdeletev1request.md @@ -1,4 +1,4 @@ -# GetLibraryRequest +# LibrariesDeleteV1Request ## Fields diff --git a/docs/models/getdocumentstatusrequest.md b/docs/models/librariesdocumentsdeletev1request.md similarity index 90% rename from docs/models/getdocumentstatusrequest.md rename to docs/models/librariesdocumentsdeletev1request.md index 3557d773..efccdb1b 100644 --- a/docs/models/getdocumentstatusrequest.md +++ b/docs/models/librariesdocumentsdeletev1request.md @@ -1,4 +1,4 @@ -# GetDocumentStatusRequest +# LibrariesDocumentsDeleteV1Request ## Fields diff --git a/docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md b/docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md new file mode 100644 index 00000000..14ca66f7 --- /dev/null +++ b/docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsGetExtractedTextSignedURLV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsgetsignedurlv1request.md b/docs/models/librariesdocumentsgetsignedurlv1request.md new file mode 100644 index 00000000..7c08c180 --- /dev/null +++ b/docs/models/librariesdocumentsgetsignedurlv1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsGetSignedURLV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/getdocumentrequest.md b/docs/models/librariesdocumentsgetstatusv1request.md similarity index 90% rename from docs/models/getdocumentrequest.md rename to docs/models/librariesdocumentsgetstatusv1request.md index 29f62127..e6d41875 100644 --- a/docs/models/getdocumentrequest.md +++ b/docs/models/librariesdocumentsgetstatusv1request.md @@ -1,4 +1,4 @@ -# GetDocumentRequest +# LibrariesDocumentsGetStatusV1Request ## Fields diff --git a/docs/models/getdocumentextractedtextsignedurlrequest.md b/docs/models/librariesdocumentsgettextcontentv1request.md similarity index 89% rename from docs/models/getdocumentextractedtextsignedurlrequest.md rename to docs/models/librariesdocumentsgettextcontentv1request.md index ff703802..2f58a446 100644 --- a/docs/models/getdocumentextractedtextsignedurlrequest.md +++ b/docs/models/librariesdocumentsgettextcontentv1request.md @@ -1,4 +1,4 @@ -# GetDocumentExtractedTextSignedURLRequest +# LibrariesDocumentsGetTextContentV1Request ## Fields diff --git a/docs/models/getdocumentsignedurlrequest.md b/docs/models/librariesdocumentsgetv1request.md similarity index 91% rename from docs/models/getdocumentsignedurlrequest.md rename to docs/models/librariesdocumentsgetv1request.md index 72a179c0..6febc058 100644 --- a/docs/models/getdocumentsignedurlrequest.md +++ b/docs/models/librariesdocumentsgetv1request.md @@ -1,4 +1,4 @@ -# GetDocumentSignedURLRequest +# LibrariesDocumentsGetV1Request ## Fields diff --git a/docs/models/listdocumentsrequest.md b/docs/models/librariesdocumentslistv1request.md similarity index 96% rename from docs/models/listdocumentsrequest.md rename to docs/models/librariesdocumentslistv1request.md index 369e8edb..44f63001 100644 --- a/docs/models/listdocumentsrequest.md +++ b/docs/models/librariesdocumentslistv1request.md @@ -1,4 +1,4 @@ -# ListDocumentsRequest +# LibrariesDocumentsListV1Request ## Fields diff --git a/docs/models/deletedocumentrequest.md b/docs/models/librariesdocumentsreprocessv1request.md similarity index 90% rename from docs/models/deletedocumentrequest.md rename to docs/models/librariesdocumentsreprocessv1request.md index eb060099..196ba17b 100644 --- a/docs/models/deletedocumentrequest.md +++ b/docs/models/librariesdocumentsreprocessv1request.md @@ -1,4 +1,4 @@ -# DeleteDocumentRequest +# LibrariesDocumentsReprocessV1Request ## Fields diff --git a/docs/models/librariesdocumentsupdatev1request.md b/docs/models/librariesdocumentsupdatev1request.md new file mode 100644 index 00000000..d4630850 --- /dev/null +++ b/docs/models/librariesdocumentsupdatev1request.md @@ -0,0 +1,10 @@ +# LibrariesDocumentsUpdateV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `update_document_request` | [models.UpdateDocumentRequest](../models/updatedocumentrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/uploaddocumentrequest.md b/docs/models/librariesdocumentsuploadv1request.md similarity index 96% rename from docs/models/uploaddocumentrequest.md rename to docs/models/librariesdocumentsuploadv1request.md index 92152b7f..172a6183 100644 --- a/docs/models/uploaddocumentrequest.md +++ b/docs/models/librariesdocumentsuploadv1request.md @@ -1,4 +1,4 @@ -# UploadDocumentRequest +# LibrariesDocumentsUploadV1Request ## Fields diff --git a/docs/models/deletelibraryrequest.md b/docs/models/librariesgetv1request.md similarity index 91% rename from docs/models/deletelibraryrequest.md rename to docs/models/librariesgetv1request.md index c229ad73..6e1e04c3 100644 --- a/docs/models/deletelibraryrequest.md +++ b/docs/models/librariesgetv1request.md @@ -1,4 +1,4 @@ -# DeleteLibraryRequest +# LibrariesGetV1Request ## Fields diff --git a/docs/models/updateorcreatelibraryaccessrequest.md b/docs/models/librariessharecreatev1request.md similarity index 95% rename from docs/models/updateorcreatelibraryaccessrequest.md rename to docs/models/librariessharecreatev1request.md index e04567b4..4c05241d 100644 --- a/docs/models/updateorcreatelibraryaccessrequest.md +++ b/docs/models/librariessharecreatev1request.md @@ -1,4 +1,4 @@ -# UpdateOrCreateLibraryAccessRequest +# LibrariesShareCreateV1Request ## Fields diff --git a/docs/models/deletelibraryaccessrequest.md b/docs/models/librariessharedeletev1request.md similarity index 96% rename from docs/models/deletelibraryaccessrequest.md rename to docs/models/librariessharedeletev1request.md index c7034b98..850e22ab 100644 --- a/docs/models/deletelibraryaccessrequest.md +++ b/docs/models/librariessharedeletev1request.md @@ -1,4 +1,4 @@ -# DeleteLibraryAccessRequest +# LibrariesShareDeleteV1Request ## Fields diff --git a/docs/models/listlibraryaccessesrequest.md b/docs/models/librariessharelistv1request.md similarity index 90% rename from docs/models/listlibraryaccessesrequest.md rename to docs/models/librariessharelistv1request.md index d98bcda2..98bf6d17 100644 --- a/docs/models/listlibraryaccessesrequest.md +++ b/docs/models/librariessharelistv1request.md @@ -1,4 +1,4 @@ -# ListLibraryAccessesRequest +# LibrariesShareListV1Request ## Fields diff --git a/docs/models/librariesupdatev1request.md b/docs/models/librariesupdatev1request.md new file mode 100644 index 00000000..c5c142db --- /dev/null +++ b/docs/models/librariesupdatev1request.md @@ -0,0 +1,9 @@ +# LibrariesUpdateV1Request + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `update_library_request` | [models.UpdateLibraryRequest](../models/updatelibraryrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/libraryout.md b/docs/models/library.md similarity index 99% rename from docs/models/libraryout.md rename to docs/models/library.md index ebf46d57..4319f43d 100644 --- a/docs/models/libraryout.md +++ b/docs/models/library.md @@ -1,4 +1,4 @@ -# LibraryOut +# Library ## Fields diff --git a/docs/models/libraryinupdate.md b/docs/models/libraryinupdate.md deleted file mode 100644 index 4aa169c7..00000000 --- a/docs/models/libraryinupdate.md +++ /dev/null @@ -1,9 +0,0 @@ -# LibraryInUpdate - - -## Fields - -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/listbatchjobsresponse.md b/docs/models/listbatchjobsresponse.md new file mode 100644 index 00000000..c23e3220 --- /dev/null +++ b/docs/models/listbatchjobsresponse.md @@ -0,0 +1,10 @@ +# ListBatchJobsResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `data` | List[[models.BatchJob](../models/batchjob.md)] | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["list"]]* | :heavy_minus_sign: | N/A | +| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listdocumentout.md b/docs/models/listdocumentsresponse.md similarity index 90% rename from docs/models/listdocumentout.md rename to docs/models/listdocumentsresponse.md index f14157b8..47b9d3b7 100644 --- a/docs/models/listdocumentout.md +++ b/docs/models/listdocumentsresponse.md @@ -1,4 +1,4 @@ -# ListDocumentOut +# ListDocumentsResponse ## Fields @@ -6,4 +6,4 @@ | Field | Type | Required | Description | | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | | `pagination` | [models.PaginationInfo](../models/paginationinfo.md) | :heavy_check_mark: | N/A | -| `data` | List[[models.DocumentOut](../models/documentout.md)] | :heavy_check_mark: | N/A | \ No newline at end of file +| `data` | List[[models.Document](../models/document.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listfilesout.md b/docs/models/listfilesresponse.md similarity index 98% rename from docs/models/listfilesout.md rename to docs/models/listfilesresponse.md index bcb1f13a..802f685f 100644 --- a/docs/models/listfilesout.md +++ b/docs/models/listfilesresponse.md @@ -1,4 +1,4 @@ -# ListFilesOut +# ListFilesResponse ## Fields diff --git a/docs/models/listfinetuningjobsrequest.md b/docs/models/listfinetuningjobsrequest.md deleted file mode 100644 index 3a04fc70..00000000 --- a/docs/models/listfinetuningjobsrequest.md +++ /dev/null @@ -1,17 +0,0 @@ -# ListFineTuningJobsRequest - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | -| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | -| `status` | [OptionalNullable[models.ListFineTuningJobsStatus]](../models/listfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | -| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | -| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | \ No newline at end of file diff --git a/docs/models/listfinetuningjobsresponse.md b/docs/models/listfinetuningjobsresponse.md new file mode 100644 index 00000000..00251242 --- /dev/null +++ b/docs/models/listfinetuningjobsresponse.md @@ -0,0 +1,10 @@ +# ListFineTuningJobsResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `data` | List[[models.ListFineTuningJobsResponseData](../models/listfinetuningjobsresponsedata.md)] | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["list"]]* | :heavy_minus_sign: | N/A | +| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listfinetuningjobsresponsedata.md b/docs/models/listfinetuningjobsresponsedata.md new file mode 100644 index 00000000..adb06444 --- /dev/null +++ b/docs/models/listfinetuningjobsresponsedata.md @@ -0,0 +1,17 @@ +# ListFineTuningJobsResponseData + + +## Supported Types + +### `models.ClassifierFineTuningJob` + +```python +value: models.ClassifierFineTuningJob = /* values here */ +``` + +### `models.CompletionFineTuningJob` + +```python +value: models.CompletionFineTuningJob = /* values here */ +``` + diff --git a/docs/models/listlibrariesresponse.md b/docs/models/listlibrariesresponse.md new file mode 100644 index 00000000..e21b9ced --- /dev/null +++ b/docs/models/listlibrariesresponse.md @@ -0,0 +1,8 @@ +# ListLibrariesResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | +| `data` | List[[models.Library](../models/library.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listlibraryout.md b/docs/models/listlibraryout.md deleted file mode 100644 index db76ffa1..00000000 --- a/docs/models/listlibraryout.md +++ /dev/null @@ -1,8 +0,0 @@ -# ListLibraryOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -| `data` | List[[models.LibraryOut](../models/libraryout.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/messageinputcontentchunks.md b/docs/models/messageinputcontentchunks.md index 4fd18a0d..05617850 100644 --- a/docs/models/messageinputcontentchunks.md +++ b/docs/models/messageinputcontentchunks.md @@ -27,9 +27,9 @@ value: models.ToolFileChunk = /* values here */ value: models.DocumentURLChunk = /* values here */ ``` -### `models.ThinkChunk` +### `models.ConversationThinkChunk` ```python -value: models.ThinkChunk = /* values here */ +value: models.ConversationThinkChunk = /* values here */ ``` diff --git a/docs/models/messageinputentry.md b/docs/models/messageinputentry.md index 52183a32..f8514fb3 100644 --- a/docs/models/messageinputentry.md +++ b/docs/models/messageinputentry.md @@ -5,13 +5,13 @@ Representation of an input message inside the conversation. ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `object` | [Optional[models.MessageInputEntryObject]](../models/messageinputentryobject.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.MessageInputEntryType]](../models/messageinputentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | -| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["message.input"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `role` | [models.Role](../models/role.md) | :heavy_check_mark: | N/A | +| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/messageinputentryobject.md b/docs/models/messageinputentryobject.md deleted file mode 100644 index 6bdd62e2..00000000 --- a/docs/models/messageinputentryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageInputEntryObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/messageinputentrytype.md b/docs/models/messageinputentrytype.md deleted file mode 100644 index d3378124..00000000 --- a/docs/models/messageinputentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageInputEntryType - - -## Values - -| Name | Value | -| --------------- | --------------- | -| `MESSAGE_INPUT` | message.input | \ No newline at end of file diff --git a/docs/models/messageoutputcontentchunks.md b/docs/models/messageoutputcontentchunks.md index d9c3d50e..c4a7777e 100644 --- a/docs/models/messageoutputcontentchunks.md +++ b/docs/models/messageoutputcontentchunks.md @@ -27,10 +27,10 @@ value: models.ToolFileChunk = /* values here */ value: models.DocumentURLChunk = /* values here */ ``` -### `models.ThinkChunk` +### `models.ConversationThinkChunk` ```python -value: models.ThinkChunk = /* values here */ +value: models.ConversationThinkChunk = /* values here */ ``` ### `models.ToolReferenceChunk` diff --git a/docs/models/messageoutputentry.md b/docs/models/messageoutputentry.md index 5b42e20d..73a1c666 100644 --- a/docs/models/messageoutputentry.md +++ b/docs/models/messageoutputentry.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `object` | [Optional[models.MessageOutputEntryObject]](../models/messageoutputentryobject.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.MessageOutputEntryType]](../models/messageoutputentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.MessageOutputEntryRole]](../models/messageoutputentryrole.md) | :heavy_minus_sign: | N/A | -| `content` | [models.MessageOutputEntryContent](../models/messageoutputentrycontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["message.output"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | +| `content` | [models.MessageOutputEntryContent](../models/messageoutputentrycontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/messageoutputentryobject.md b/docs/models/messageoutputentryobject.md deleted file mode 100644 index bb254c82..00000000 --- a/docs/models/messageoutputentryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageOutputEntryObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/messageoutputentryrole.md b/docs/models/messageoutputentryrole.md deleted file mode 100644 index 783ee0aa..00000000 --- a/docs/models/messageoutputentryrole.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageOutputEntryRole - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/docs/models/messageoutputentrytype.md b/docs/models/messageoutputentrytype.md deleted file mode 100644 index cb4a7a1b..00000000 --- a/docs/models/messageoutputentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageOutputEntryType - - -## Values - -| Name | Value | -| ---------------- | ---------------- | -| `MESSAGE_OUTPUT` | message.output | \ No newline at end of file diff --git a/docs/models/messageoutputevent.md b/docs/models/messageoutputevent.md index b0fa1a2d..e09a965f 100644 --- a/docs/models/messageoutputevent.md +++ b/docs/models/messageoutputevent.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `type` | *Literal["message.output.delta"]* | :heavy_check_mark: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `content_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.MessageOutputEventRole]](../models/messageoutputeventrole.md) | :heavy_minus_sign: | N/A | -| `content` | [models.MessageOutputEventContent](../models/messageoutputeventcontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `type` | *Literal["message.output.delta"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `content_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | +| `content` | [models.MessageOutputEventContent](../models/messageoutputeventcontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/messageoutputeventrole.md b/docs/models/messageoutputeventrole.md deleted file mode 100644 index e38c6472..00000000 --- a/docs/models/messageoutputeventrole.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageOutputEventRole - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/docs/models/metricout.md b/docs/models/metric.md similarity index 98% rename from docs/models/metricout.md rename to docs/models/metric.md index 3c552bac..7f863036 100644 --- a/docs/models/metricout.md +++ b/docs/models/metric.md @@ -1,4 +1,4 @@ -# MetricOut +# Metric Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase). diff --git a/docs/models/modelconversation.md b/docs/models/modelconversation.md index 813e1f3a..af2e5c61 100644 --- a/docs/models/modelconversation.md +++ b/docs/models/modelconversation.md @@ -3,16 +3,16 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.ModelConversationTool](../models/modelconversationtool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | -| `object` | [Optional[models.ModelConversationObject]](../models/modelconversationobject.md) | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.ModelConversationTool](../models/modelconversationtool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `object` | *Optional[Literal["conversation"]]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/modelconversationobject.md b/docs/models/modelconversationobject.md deleted file mode 100644 index ead1fa26..00000000 --- a/docs/models/modelconversationobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ModelConversationObject - - -## Values - -| Name | Value | -| -------------- | -------------- | -| `CONVERSATION` | conversation | \ No newline at end of file diff --git a/docs/models/ocrrequest.md b/docs/models/ocrrequest.md index 87929e53..dd3fc2ea 100644 --- a/docs/models/ocrrequest.md +++ b/docs/models/ocrrequest.md @@ -3,18 +3,18 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | -| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | -| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | {
"type": "text"
} | -| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | {
"type": "text"
} | -| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | -| `table_format` | [OptionalNullable[models.TableFormat]](../models/tableformat.md) | :heavy_minus_sign: | N/A | | -| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `document` | [models.DocumentUnion](../models/documentunion.md) | :heavy_check_mark: | Document to run OCR on | | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | +| `table_format` | [OptionalNullable[models.TableFormat]](../models/tableformat.md) | :heavy_minus_sign: | N/A | | +| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/outputcontentchunks.md b/docs/models/outputcontentchunks.md index c76bc31d..e5185014 100644 --- a/docs/models/outputcontentchunks.md +++ b/docs/models/outputcontentchunks.md @@ -27,10 +27,10 @@ value: models.ToolFileChunk = /* values here */ value: models.DocumentURLChunk = /* values here */ ``` -### `models.ThinkChunk` +### `models.ConversationThinkChunk` ```python -value: models.ThinkChunk = /* values here */ +value: models.ConversationThinkChunk = /* values here */ ``` ### `models.ToolReferenceChunk` diff --git a/docs/models/realtimetranscriptioninputaudioappend.md b/docs/models/realtimetranscriptioninputaudioappend.md new file mode 100644 index 00000000..5ee365eb --- /dev/null +++ b/docs/models/realtimetranscriptioninputaudioappend.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionInputAudioAppend + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["input_audio.append"]]* | :heavy_minus_sign: | N/A | +| `audio` | *str* | :heavy_check_mark: | Base64-encoded raw PCM bytes matching the current audio_format. Max decoded size: 262144 bytes. | \ No newline at end of file diff --git a/docs/models/realtimetranscriptioninputaudioend.md b/docs/models/realtimetranscriptioninputaudioend.md new file mode 100644 index 00000000..393d208c --- /dev/null +++ b/docs/models/realtimetranscriptioninputaudioend.md @@ -0,0 +1,8 @@ +# RealtimeTranscriptionInputAudioEnd + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | +| `type` | *Optional[Literal["input_audio.end"]]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptioninputaudioflush.md b/docs/models/realtimetranscriptioninputaudioflush.md new file mode 100644 index 00000000..367725ba --- /dev/null +++ b/docs/models/realtimetranscriptioninputaudioflush.md @@ -0,0 +1,8 @@ +# RealtimeTranscriptionInputAudioFlush + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `type` | *Optional[Literal["input_audio.flush"]]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsession.md b/docs/models/realtimetranscriptionsession.md index 94a0a89e..750bd7f7 100644 --- a/docs/models/realtimetranscriptionsession.md +++ b/docs/models/realtimetranscriptionsession.md @@ -7,4 +7,5 @@ | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | | `request_id` | *str* | :heavy_check_mark: | N/A | | `model` | *str* | :heavy_check_mark: | N/A | -| `audio_format` | [models.AudioFormat](../models/audioformat.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| `audio_format` | [models.AudioFormat](../models/audioformat.md) | :heavy_check_mark: | N/A | +| `target_streaming_delay_ms` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsessionupdatemessage.md b/docs/models/realtimetranscriptionsessionupdatemessage.md new file mode 100644 index 00000000..2a50ca92 --- /dev/null +++ b/docs/models/realtimetranscriptionsessionupdatemessage.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionSessionUpdateMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["session.update"]]* | :heavy_minus_sign: | N/A | +| `session` | [models.RealtimeTranscriptionSessionUpdatePayload](../models/realtimetranscriptionsessionupdatepayload.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/getagentrequest.md b/docs/models/realtimetranscriptionsessionupdatepayload.md similarity index 57% rename from docs/models/getagentrequest.md rename to docs/models/realtimetranscriptionsessionupdatepayload.md index 3f729dff..d6c6547d 100644 --- a/docs/models/getagentrequest.md +++ b/docs/models/realtimetranscriptionsessionupdatepayload.md @@ -1,9 +1,9 @@ -# GetAgentRequest +# RealtimeTranscriptionSessionUpdatePayload ## Fields | Field | Type | Required | Description | | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | [OptionalNullable[models.GetAgentAgentVersion]](../models/getagentagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `audio_format` | [OptionalNullable[models.AudioFormat]](../models/audioformat.md) | :heavy_minus_sign: | Set before sending audio. Audio format updates are rejected after audio starts. | +| `target_streaming_delay_ms` | *OptionalNullable[int]* | :heavy_minus_sign: | Set before sending audio. Streaming delay updates are rejected after audio starts. | \ No newline at end of file diff --git a/docs/models/referencechunk.md b/docs/models/referencechunk.md index a132ca2f..d847e248 100644 --- a/docs/models/referencechunk.md +++ b/docs/models/referencechunk.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ReferenceChunkType]](../models/referencechunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------- | -------------------------------- | -------------------------------- | -------------------------------- | +| `type` | *Optional[Literal["reference"]]* | :heavy_minus_sign: | N/A | +| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/referencechunktype.md b/docs/models/referencechunktype.md deleted file mode 100644 index 1e0e2fe6..00000000 --- a/docs/models/referencechunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ReferenceChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `REFERENCE` | reference | \ No newline at end of file diff --git a/docs/models/reprocessdocumentrequest.md b/docs/models/reprocessdocumentrequest.md deleted file mode 100644 index cf3982a8..00000000 --- a/docs/models/reprocessdocumentrequest.md +++ /dev/null @@ -1,9 +0,0 @@ -# ReprocessDocumentRequest - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/response.md b/docs/models/response.md index 3512b7a8..ff679257 100644 --- a/docs/models/response.md +++ b/docs/models/response.md @@ -3,15 +3,15 @@ ## Supported Types -### `models.ClassifierJobOut` +### `models.ClassifierFineTuningJob` ```python -value: models.ClassifierJobOut = /* values here */ +value: models.ClassifierFineTuningJob = /* values here */ ``` -### `models.CompletionJobOut` +### `models.CompletionFineTuningJob` ```python -value: models.CompletionJobOut = /* values here */ +value: models.CompletionFineTuningJob = /* values here */ ``` diff --git a/docs/models/retrievemodelrequest.md b/docs/models/retrievemodelv1modelsmodelidgetrequest.md similarity index 94% rename from docs/models/retrievemodelrequest.md rename to docs/models/retrievemodelv1modelsmodelidgetrequest.md index 787c3dd1..f1280f88 100644 --- a/docs/models/retrievemodelrequest.md +++ b/docs/models/retrievemodelv1modelsmodelidgetrequest.md @@ -1,4 +1,4 @@ -# RetrieveModelRequest +# RetrieveModelV1ModelsModelIDGetRequest ## Fields diff --git a/docs/models/messageinputentryrole.md b/docs/models/role.md similarity index 84% rename from docs/models/messageinputentryrole.md rename to docs/models/role.md index f2fdc71d..853c6257 100644 --- a/docs/models/messageinputentryrole.md +++ b/docs/models/role.md @@ -1,4 +1,4 @@ -# MessageInputEntryRole +# Role ## Values diff --git a/docs/models/startfinetuningjobresponse.md b/docs/models/startfinetuningjobresponse.md deleted file mode 100644 index dce84c5a..00000000 --- a/docs/models/startfinetuningjobresponse.md +++ /dev/null @@ -1,19 +0,0 @@ -# StartFineTuningJobResponse - -OK - - -## Supported Types - -### `models.ClassifierDetailedJobOut` - -```python -value: models.ClassifierDetailedJobOut = /* values here */ -``` - -### `models.CompletionDetailedJobOut` - -```python -value: models.CompletionDetailedJobOut = /* values here */ -``` - diff --git a/docs/models/systemmessage.md b/docs/models/systemmessage.md index dfb0cd0b..10bda10f 100644 --- a/docs/models/systemmessage.md +++ b/docs/models/systemmessage.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | *Literal["system"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| `role` | *Literal["system"]* | :heavy_check_mark: | N/A | +| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/textchunk.md b/docs/models/textchunk.md index d488cb51..df0e61c3 100644 --- a/docs/models/textchunk.md +++ b/docs/models/textchunk.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `text` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.TextChunkType]](../models/textchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `type` | *Optional[Literal["text"]]* | :heavy_minus_sign: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/textchunktype.md b/docs/models/textchunktype.md deleted file mode 100644 index e2a2ae8b..00000000 --- a/docs/models/textchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# TextChunkType - - -## Values - -| Name | Value | -| ------ | ------ | -| `TEXT` | text | \ No newline at end of file diff --git a/docs/models/thinkchunk.md b/docs/models/thinkchunk.md index 66b2e0cd..70c0369f 100644 --- a/docs/models/thinkchunk.md +++ b/docs/models/thinkchunk.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | -| `thinking` | List[[models.Thinking](../models/thinking.md)] | :heavy_check_mark: | N/A | -| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | -| `type` | [Optional[models.ThinkChunkType]](../models/thinkchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | *Literal["thinking"]* | :heavy_check_mark: | N/A | +| `thinking` | List[[models.ThinkChunkThinking](../models/thinkchunkthinking.md)] | :heavy_check_mark: | N/A | +| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | \ No newline at end of file diff --git a/docs/models/thinking.md b/docs/models/thinkchunkthinking.md similarity index 90% rename from docs/models/thinking.md rename to docs/models/thinkchunkthinking.md index c7a0d5c9..dd1ecca1 100644 --- a/docs/models/thinking.md +++ b/docs/models/thinkchunkthinking.md @@ -1,4 +1,4 @@ -# Thinking +# ThinkChunkThinking ## Supported Types diff --git a/docs/models/thinkchunktype.md b/docs/models/thinkchunktype.md deleted file mode 100644 index baf6f755..00000000 --- a/docs/models/thinkchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ThinkChunkType - - -## Values - -| Name | Value | -| ---------- | ---------- | -| `THINKING` | thinking | \ No newline at end of file diff --git a/docs/models/toolcallconfirmation.md b/docs/models/toolcallconfirmation.md new file mode 100644 index 00000000..1812f7d6 --- /dev/null +++ b/docs/models/toolcallconfirmation.md @@ -0,0 +1,9 @@ +# ToolCallConfirmation + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `confirmation` | [models.Confirmation](../models/confirmation.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/updateftmodelin.md b/docs/models/toolconfiguration.md similarity index 54% rename from docs/models/updateftmodelin.md rename to docs/models/toolconfiguration.md index 4e55b1a7..89286a17 100644 --- a/docs/models/updateftmodelin.md +++ b/docs/models/toolconfiguration.md @@ -1,9 +1,10 @@ -# UpdateFTModelIn +# ToolConfiguration ## Fields | Field | Type | Required | Description | | ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `exclude` | List[*str*] | :heavy_minus_sign: | N/A | +| `include` | List[*str*] | :heavy_minus_sign: | N/A | +| `requires_confirmation` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionentry.md b/docs/models/toolexecutionentry.md index adf88fb1..03316381 100644 --- a/docs/models/toolexecutionentry.md +++ b/docs/models/toolexecutionentry.md @@ -3,13 +3,15 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `object` | [Optional[models.ToolExecutionEntryObject]](../models/toolexecutionentryobject.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolExecutionEntryType]](../models/toolexecutionentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `name` | [models.ToolExecutionEntryName](../models/toolexecutionentryname.md) | :heavy_check_mark: | N/A | -| `arguments` | *str* | :heavy_check_mark: | N/A | -| `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["tool.execution"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `name` | [models.ToolExecutionEntryName](../models/toolexecutionentryname.md) | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | +| `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionentryobject.md b/docs/models/toolexecutionentryobject.md deleted file mode 100644 index 0ca79af5..00000000 --- a/docs/models/toolexecutionentryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolExecutionEntryObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/toolexecutionentrytype.md b/docs/models/toolexecutionentrytype.md deleted file mode 100644 index a67629b8..00000000 --- a/docs/models/toolexecutionentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolExecutionEntryType - - -## Values - -| Name | Value | -| ---------------- | ---------------- | -| `TOOL_EXECUTION` | tool.execution | \ No newline at end of file diff --git a/docs/models/toolexecutionstartedevent.md b/docs/models/toolexecutionstartedevent.md index c41c7258..189b8a3d 100644 --- a/docs/models/toolexecutionstartedevent.md +++ b/docs/models/toolexecutionstartedevent.md @@ -9,5 +9,7 @@ | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `name` | [models.ToolExecutionStartedEventName](../models/toolexecutionstartedeventname.md) | :heavy_check_mark: | N/A | | `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolfilechunk.md b/docs/models/toolfilechunk.md index a3ffaa2b..d6002175 100644 --- a/docs/models/toolfilechunk.md +++ b/docs/models/toolfilechunk.md @@ -3,10 +3,10 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `type` | [Optional[models.ToolFileChunkType]](../models/toolfilechunktype.md) | :heavy_minus_sign: | N/A | -| `tool` | [models.ToolFileChunkTool](../models/toolfilechunktool.md) | :heavy_check_mark: | N/A | -| `file_id` | *str* | :heavy_check_mark: | N/A | -| `file_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `file_type` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `type` | *Optional[Literal["tool_file"]]* | :heavy_minus_sign: | N/A | +| `tool` | [models.ToolFileChunkTool](../models/toolfilechunktool.md) | :heavy_check_mark: | N/A | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `file_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `file_type` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolfilechunktype.md b/docs/models/toolfilechunktype.md deleted file mode 100644 index 7e99acef..00000000 --- a/docs/models/toolfilechunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolFileChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `TOOL_FILE` | tool_file | \ No newline at end of file diff --git a/docs/models/toolmessage.md b/docs/models/toolmessage.md index fa00d666..7201481e 100644 --- a/docs/models/toolmessage.md +++ b/docs/models/toolmessage.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `role` | *Literal["tool"]* | :heavy_check_mark: | N/A | | `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | | `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | *Literal["tool"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolreferencechunk.md b/docs/models/toolreferencechunk.md index 3020dbc9..49ea4ca7 100644 --- a/docs/models/toolreferencechunk.md +++ b/docs/models/toolreferencechunk.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `type` | [Optional[models.ToolReferenceChunkType]](../models/toolreferencechunktype.md) | :heavy_minus_sign: | N/A | -| `tool` | [models.ToolReferenceChunkTool](../models/toolreferencechunktool.md) | :heavy_check_mark: | N/A | -| `title` | *str* | :heavy_check_mark: | N/A | -| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `favicon` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Optional[Literal["tool_reference"]]* | :heavy_minus_sign: | N/A | +| `tool` | [models.ToolReferenceChunkTool](../models/toolreferencechunktool.md) | :heavy_check_mark: | N/A | +| `title` | *str* | :heavy_check_mark: | N/A | +| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `favicon` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolreferencechunktype.md b/docs/models/toolreferencechunktype.md deleted file mode 100644 index bc57d277..00000000 --- a/docs/models/toolreferencechunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolReferenceChunkType - - -## Values - -| Name | Value | -| ---------------- | ---------------- | -| `TOOL_REFERENCE` | tool_reference | \ No newline at end of file diff --git a/docs/models/transcriptionsegmentchunk.md b/docs/models/transcriptionsegmentchunk.md index 00a599ee..d7672c0e 100644 --- a/docs/models/transcriptionsegmentchunk.md +++ b/docs/models/transcriptionsegmentchunk.md @@ -3,12 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -| `text` | *str* | :heavy_check_mark: | N/A | -| `start` | *float* | :heavy_check_mark: | N/A | -| `end` | *float* | :heavy_check_mark: | N/A | -| `score` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.TranscriptionSegmentChunkType]](../models/transcriptionsegmentchunktype.md) | :heavy_minus_sign: | N/A | -| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | +| `type` | *Optional[Literal["transcription_segment"]]* | :heavy_minus_sign: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `start` | *float* | :heavy_check_mark: | N/A | +| `end` | *float* | :heavy_check_mark: | N/A | +| `score` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionsegmentchunktype.md b/docs/models/transcriptionsegmentchunktype.md deleted file mode 100644 index 2968fa26..00000000 --- a/docs/models/transcriptionsegmentchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# TranscriptionSegmentChunkType - - -## Values - -| Name | Value | -| ----------------------- | ----------------------- | -| `TRANSCRIPTION_SEGMENT` | transcription_segment | \ No newline at end of file diff --git a/docs/models/transcriptionstreamsegmentdelta.md b/docs/models/transcriptionstreamsegmentdelta.md index e0143a39..1b652a3b 100644 --- a/docs/models/transcriptionstreamsegmentdelta.md +++ b/docs/models/transcriptionstreamsegmentdelta.md @@ -5,9 +5,9 @@ | Field | Type | Required | Description | | ---------------------------------- | ---------------------------------- | ---------------------------------- | ---------------------------------- | +| `type` | *Literal["transcription.segment"]* | :heavy_check_mark: | N/A | | `text` | *str* | :heavy_check_mark: | N/A | | `start` | *float* | :heavy_check_mark: | N/A | | `end` | *float* | :heavy_check_mark: | N/A | | `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `type` | *Literal["transcription.segment"]* | :heavy_check_mark: | N/A | | `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamtextdelta.md b/docs/models/transcriptionstreamtextdelta.md index a4062171..77bd0ddc 100644 --- a/docs/models/transcriptionstreamtextdelta.md +++ b/docs/models/transcriptionstreamtextdelta.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | ------------------------------------- | ------------------------------------- | ------------------------------------- | ------------------------------------- | -| `text` | *str* | :heavy_check_mark: | N/A | | `type` | *Literal["transcription.text.delta"]* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | | `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/archiveftmodelout.md b/docs/models/unarchivemodelresponse.md similarity index 96% rename from docs/models/archiveftmodelout.md rename to docs/models/unarchivemodelresponse.md index 98fa7b19..375962a7 100644 --- a/docs/models/archiveftmodelout.md +++ b/docs/models/unarchivemodelresponse.md @@ -1,4 +1,4 @@ -# ArchiveFTModelOut +# UnarchiveModelResponse ## Fields diff --git a/docs/models/updateagentrequest.md b/docs/models/updateagentrequest.md index 358cb71d..d3428d92 100644 --- a/docs/models/updateagentrequest.md +++ b/docs/models/updateagentrequest.md @@ -3,7 +3,15 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_update_request` | [models.AgentUpdateRequest](../models/agentupdaterequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.UpdateAgentRequestTool](../models/updateagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentcreationrequesttool.md b/docs/models/updateagentrequesttool.md similarity index 95% rename from docs/models/agentcreationrequesttool.md rename to docs/models/updateagentrequesttool.md index b3bd7fa3..e358b1ed 100644 --- a/docs/models/agentcreationrequesttool.md +++ b/docs/models/updateagentrequesttool.md @@ -1,4 +1,4 @@ -# AgentCreationRequestTool +# UpdateAgentRequestTool ## Supported Types diff --git a/docs/models/updatedocumentrequest.md b/docs/models/updatedocumentrequest.md index fa5d117a..7e0b41b7 100644 --- a/docs/models/updatedocumentrequest.md +++ b/docs/models/updatedocumentrequest.md @@ -3,8 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `document_id` | *str* | :heavy_check_mark: | N/A | -| `document_update_in` | [models.DocumentUpdateIn](../models/documentupdatein.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `attributes` | Dict[str, [models.Attributes](../models/attributes.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/updatelibraryrequest.md b/docs/models/updatelibraryrequest.md index e03883cc..aaffc5a9 100644 --- a/docs/models/updatelibraryrequest.md +++ b/docs/models/updatelibraryrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `library_in_update` | [models.LibraryInUpdate](../models/libraryinupdate.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/updatemodelrequest.md b/docs/models/updatemodelrequest.md index 5799c63b..56b84c59 100644 --- a/docs/models/updatemodelrequest.md +++ b/docs/models/updatemodelrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | -| `model_id` | *str* | :heavy_check_mark: | The ID of the model to update. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | -| `update_ft_model_in` | [models.UpdateFTModelIn](../models/updateftmodelin.md) | :heavy_check_mark: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/updatemodelresponse.md b/docs/models/updatemodelresponse.md deleted file mode 100644 index 275ee77f..00000000 --- a/docs/models/updatemodelresponse.md +++ /dev/null @@ -1,19 +0,0 @@ -# UpdateModelResponse - -OK - - -## Supported Types - -### `models.ClassifierFTModelOut` - -```python -value: models.ClassifierFTModelOut = /* values here */ -``` - -### `models.CompletionFTModelOut` - -```python -value: models.CompletionFTModelOut = /* values here */ -``` - diff --git a/docs/models/usermessage.md b/docs/models/usermessage.md index 78ed066e..e7a932ed 100644 --- a/docs/models/usermessage.md +++ b/docs/models/usermessage.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | *Literal["user"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| `role` | *Literal["user"]* | :heavy_check_mark: | N/A | +| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegrationout.md b/docs/models/wandbintegrationresult.md similarity index 98% rename from docs/models/wandbintegrationout.md rename to docs/models/wandbintegrationresult.md index a6f65667..d12bc311 100644 --- a/docs/models/wandbintegrationout.md +++ b/docs/models/wandbintegrationresult.md @@ -1,4 +1,4 @@ -# WandbIntegrationOut +# WandbIntegrationResult ## Fields diff --git a/docs/models/websearchpremiumtool.md b/docs/models/websearchpremiumtool.md index 07b8b926..78b736cd 100644 --- a/docs/models/websearchpremiumtool.md +++ b/docs/models/websearchpremiumtool.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------- | ------------------------------- | ------------------------------- | ------------------------------- | -| `type` | *Literal["web_search_premium"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["web_search_premium"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/websearchtool.md b/docs/models/websearchtool.md index da5e7b7b..4ca7333c 100644 --- a/docs/models/websearchtool.md +++ b/docs/models/websearchtool.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `type` | *Literal["web_search"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["web_search"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/sdks/accesses/README.md b/docs/sdks/accesses/README.md index c1e3866d..c50456df 100644 --- a/docs/sdks/accesses/README.md +++ b/docs/sdks/accesses/README.md @@ -16,7 +16,7 @@ Given a library, list all of the Entity that have access and to what level. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -26,7 +26,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.accesses.list(library_id="9eb628ef-f118-47eb-b3cc-9750c4ca5fb6") + res = mistral.beta.libraries.accesses.list(library_id="d2169833-d8e2-416e-a372-76518d3d99c2") # Handle response print(res) @@ -48,8 +48,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## update_or_create @@ -57,7 +57,7 @@ Given a library id, you can create or update the access level of an entity. You ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -67,7 +67,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.accesses.update_or_create(library_id="88bb030c-1cb5-4231-ba13-742c56554876", level="Viewer", share_with_uuid="6a736283-c1fa-49b0-9b6d-ea9309c0a766", share_with_type="Workspace") + res = mistral.beta.libraries.accesses.update_or_create(library_id="36de3a24-5b1c-4c8f-9d84-d5642205a976", level="Viewer", share_with_uuid="0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", share_with_type="User") # Handle response print(res) @@ -93,8 +93,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -102,7 +102,7 @@ Given a library id, you can delete the access level of an entity. An owner canno ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -112,7 +112,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.accesses.delete(library_id="fc7ab1cf-e33c-4791-a6e0-95ff1f921c43", share_with_uuid="5818ddff-3568-40f1-a9e4-39d6cb9f5c94", share_with_type="Org") + res = mistral.beta.libraries.accesses.delete(library_id="709e3cad-9fb2-4f4e-bf88-143cf1808107", share_with_uuid="b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", share_with_type="User") # Handle response print(res) @@ -137,5 +137,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index cd3ec4c6..8a608370 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -15,7 +15,7 @@ Agents Completion ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -27,8 +27,8 @@ with Mistral( res = mistral.agents.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], agent_id="", stream=False, response_format={ "type": "text", @@ -50,7 +50,7 @@ with Mistral( | `stop` | [Optional[models.AgentsCompletionRequestStop]](../../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | @@ -69,8 +69,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## stream @@ -90,8 +90,8 @@ with Mistral( res = mistral.agents.stream(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], agent_id="", stream=True, response_format={ "type": "text", @@ -115,7 +115,7 @@ with Mistral( | `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | @@ -134,5 +134,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/batchjobs/README.md b/docs/sdks/batchjobs/README.md index 24316d78..3633fe4e 100644 --- a/docs/sdks/batchjobs/README.md +++ b/docs/sdks/batchjobs/README.md @@ -15,7 +15,7 @@ Get a list of batch jobs for your organization and user. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -49,13 +49,13 @@ with Mistral( ### Response -**[models.BatchJobsOut](../../models/batchjobsout.md)** +**[models.ListBatchJobsResponse](../../models/listbatchjobsresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## create @@ -63,7 +63,7 @@ Create a new batch job, it will be queued for processing. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -73,7 +73,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.create(endpoint="/v1/classifications", model="mistral-small-latest", timeout_hours=24) + res = mistral.batch.jobs.create(endpoint="/v1/moderations", model="mistral-small-latest", timeout_hours=24) # Handle response print(res) @@ -87,7 +87,7 @@ with Mistral( | `endpoint` | [models.APIEndpoint](../../models/apiendpoint.md) | :heavy_check_mark: | N/A | | | `input_files` | List[*str*] | :heavy_minus_sign: | The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a "body" field. An example of such file is the following: ```json {"custom_id": "0", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French cheese?"}]}} {"custom_id": "1", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French wine?"}]}} ``` | | | `requests` | List[[models.BatchRequest](../../models/batchrequest.md)] | :heavy_minus_sign: | N/A | | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model to be used for batch inference. | mistral-small-latest | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model to be used for batch inference. | **Example 1:** mistral-small-latest
**Example 2:** mistral-medium-latest | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. | | | `metadata` | Dict[str, *str*] | :heavy_minus_sign: | The metadata of your choice to be associated with the batch inference job. | | | `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | The timeout in hours for the batch inference job. | | @@ -95,13 +95,13 @@ with Mistral( ### Response -**[models.BatchJobOut](../../models/batchjobout.md)** +**[models.BatchJob](../../models/batchjob.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get @@ -112,7 +112,7 @@ Args: ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -122,7 +122,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.get(job_id="358c80a1-79bd-43f0-8f0e-8186713aa3ba") + res = mistral.batch.jobs.get(job_id="4017dc9f-b629-42f4-9700-8c681b9e7f0f") # Handle response print(res) @@ -139,13 +139,13 @@ with Mistral( ### Response -**[models.BatchJobOut](../../models/batchjobout.md)** +**[models.BatchJob](../../models/batchjob.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## cancel @@ -153,7 +153,7 @@ Request the cancellation of a batch job. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -163,7 +163,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.cancel(job_id="393537d7-8b33-4931-a289-7f61f8757eda") + res = mistral.batch.jobs.cancel(job_id="4fb29d1c-535b-4f0a-a1cb-2167f86da569") # Handle response print(res) @@ -179,10 +179,10 @@ with Mistral( ### Response -**[models.BatchJobOut](../../models/batchjobout.md)** +**[models.BatchJob](../../models/batchjob.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/betaagents/README.md b/docs/sdks/betaagents/README.md index 0ef655a3..aaa5110e 100644 --- a/docs/sdks/betaagents/README.md +++ b/docs/sdks/betaagents/README.md @@ -24,7 +24,7 @@ Create a new agent giving it instructions, tools, description. The agent is then ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -34,7 +34,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.create(model="Mustang", name="", completion_args={ + res = mistral.beta.agents.create(model="LeBaron", name="", completion_args={ "response_format": { "type": "text", }, @@ -47,18 +47,18 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentCreationRequestTool](../../models/agentcreationrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.CreateAgentRequestTool](../../models/createagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -68,8 +68,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## list @@ -77,7 +77,7 @@ Retrieve a list of agent entities sorted by creation time. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -116,8 +116,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get @@ -125,7 +125,7 @@ Given an agent, retrieve an agent entity with its attributes. The agent_version ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -144,11 +144,11 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | [OptionalNullable[models.GetAgentAgentVersion]](../../models/getagentagentversion.md) | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.AgentsAPIV1AgentsGetAgentVersion]](../../models/agentsapiv1agentsgetagentversion.md) | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -158,8 +158,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## update @@ -167,7 +167,7 @@ Update an agent attributes and create a new version. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -194,7 +194,7 @@ with Mistral( | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | | `agent_id` | *str* | :heavy_check_mark: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentUpdateRequestTool](../../models/agentupdaterequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `tools` | List[[models.UpdateAgentRequestTool](../../models/updateagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | @@ -213,8 +213,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -222,7 +222,7 @@ Delete an agent entity. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -249,8 +249,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## update_version @@ -258,7 +258,7 @@ Switch the version of an agent. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -268,7 +268,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.update_version(agent_id="", version=958693) + res = mistral.beta.agents.update_version(agent_id="", version=157995) # Handle response print(res) @@ -291,8 +291,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## list_versions @@ -300,7 +300,7 @@ Retrieve all versions for a specific agent with full agent context. Supports pag ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -334,8 +334,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get_version @@ -343,7 +343,7 @@ Get a specific agent version by version number. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -353,7 +353,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.get_version(agent_id="", version="") + res = mistral.beta.agents.get_version(agent_id="", version="788393") # Handle response print(res) @@ -376,8 +376,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## create_version_alias @@ -385,7 +385,7 @@ Create a new alias or update an existing alias to point to a specific version. A ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -395,7 +395,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.create_version_alias(agent_id="", alias="", version=154719) + res = mistral.beta.agents.create_version_alias(agent_id="", alias="", version=595141) # Handle response print(res) @@ -419,8 +419,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## list_version_aliases @@ -428,7 +428,7 @@ Retrieve all version aliases for a specific agent. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -460,8 +460,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete_version_alias @@ -469,7 +469,7 @@ Delete an existing alias for an agent. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -497,5 +497,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 6907c29d..1bf4aead 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -27,8 +27,8 @@ with Mistral( res = mistral.chat.complete(model="mistral-large-latest", messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], stream=False, response_format={ "type": "text", @@ -52,7 +52,7 @@ with Mistral( | `stop` | [Optional[models.ChatCompletionRequestStop]](../../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | @@ -72,8 +72,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## stream @@ -93,8 +93,8 @@ with Mistral( res = mistral.chat.stream(model="mistral-large-latest", messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], stream=True, response_format={ "type": "text", @@ -120,7 +120,7 @@ with Mistral( | `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | @@ -140,5 +140,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 41b52081..dc0f4984 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -17,7 +17,7 @@ Moderations ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -27,10 +27,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.moderate(model="Durango", inputs=[ - "", - "", - ]) + res = mistral.classifiers.moderate(model="mistral-moderation-latest", inputs="") # Handle response print(res) @@ -54,8 +51,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## moderate_chat @@ -63,7 +60,7 @@ Chat Moderations ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -75,8 +72,8 @@ with Mistral( res = mistral.classifiers.moderate_chat(inputs=[ { - "content": "", "role": "tool", + "content": "", }, ], model="LeBaron") @@ -101,8 +98,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## classify @@ -146,8 +143,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## classify_chat @@ -165,12 +162,12 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.classify_chat(model="Camry", inputs=[ + res = mistral.classifiers.classify_chat(model="Camry", input=[ { "messages": [ { - "content": "", "role": "system", + "content": "", }, ], }, @@ -186,7 +183,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | N/A | -| `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Chat to classify | +| `input` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Chat to classify | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -197,5 +194,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index c0089f12..e77d329b 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -24,7 +24,7 @@ Create a new conversation, using a base model or an agent and append entries. Co ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -72,8 +72,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## list @@ -81,7 +81,7 @@ Retrieve a list of conversation entities sorted by creation time. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -109,14 +109,14 @@ with Mistral( ### Response -**[List[models.ListConversationsResponse]](../../models/.md)** +**[List[models.AgentsAPIV1ConversationsListResponse]](../../models/.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get @@ -124,7 +124,7 @@ Given a conversation_id retrieve a conversation entity with its attributes. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -156,8 +156,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -165,7 +165,7 @@ Delete a conversation given a conversation_id. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -192,8 +192,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## append @@ -201,7 +201,7 @@ Run completion on the history of the conversation and the user entries. Return t ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -211,7 +211,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.append(conversation_id="", inputs=[], stream=False, store=True, handoff_execution="server", completion_args={ + res = mistral.beta.conversations.append(conversation_id="", stream=False, store=True, handoff_execution="server", completion_args={ "response_format": { "type": "text", }, @@ -227,11 +227,12 @@ with Mistral( | Parameter | Type | Required | Description | | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | | `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation to which we append entries. | -| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationAppendRequestHandoffExecution]](../../models/conversationappendrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `tool_confirmations` | List[[models.ToolCallConfirmation](../../models/toolcallconfirmation.md)] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -242,8 +243,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get_history @@ -251,7 +252,7 @@ Given a conversation_id retrieve all the entries belonging to that conversation. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -283,8 +284,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get_messages @@ -292,7 +293,7 @@ Given a conversation_id retrieve all the messages belonging to that conversation ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -324,8 +325,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## restart @@ -333,7 +334,7 @@ Given a conversation_id and an id, recreate a conversation from this point and r ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -343,7 +344,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.restart(conversation_id="", inputs="", from_entry_id="", stream=False, store=True, handoff_execution="server", completion_args={ + res = mistral.beta.conversations.restart(conversation_id="", from_entry_id="", stream=False, store=True, handoff_execution="server", completion_args={ "response_format": { "type": "text", }, @@ -359,8 +360,8 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | | `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | -| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | @@ -377,8 +378,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## start_stream @@ -386,7 +387,7 @@ Create a new conversation, using a base model or an agent and append entries. Co ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -396,7 +397,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.start_stream(inputs="", stream=True, completion_args={ + res = mistral.beta.conversations.start_stream(inputs=[ + { + "object": "entry", + "type": "function.result", + "tool_call_id": "", + "result": "", + }, + ], stream=True, completion_args={ "response_format": { "type": "text", }, @@ -436,8 +444,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## append_stream @@ -445,7 +453,7 @@ Run completion on the history of the conversation and the user entries. Return t ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -455,7 +463,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.append_stream(conversation_id="", inputs="", stream=True, store=True, handoff_execution="server", completion_args={ + res = mistral.beta.conversations.append_stream(conversation_id="", stream=True, store=True, handoff_execution="server", completion_args={ "response_format": { "type": "text", }, @@ -473,11 +481,12 @@ with Mistral( | Parameter | Type | Required | Description | | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | | `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation to which we append entries. | -| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationAppendStreamRequestHandoffExecution]](../../models/conversationappendstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `tool_confirmations` | List[[models.ToolCallConfirmation](../../models/toolcallconfirmation.md)] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -488,8 +497,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## restart_stream @@ -497,7 +506,7 @@ Given a conversation_id and an id, recreate a conversation from this point and r ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -507,7 +516,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.restart_stream(conversation_id="", inputs="", from_entry_id="", stream=True, store=True, handoff_execution="server", completion_args={ + res = mistral.beta.conversations.restart_stream(conversation_id="", from_entry_id="", stream=True, store=True, handoff_execution="server", completion_args={ "response_format": { "type": "text", }, @@ -525,8 +534,8 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | | `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | -| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | @@ -543,5 +552,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/documents/README.md b/docs/sdks/documents/README.md index 97831f86..9c219b67 100644 --- a/docs/sdks/documents/README.md +++ b/docs/sdks/documents/README.md @@ -23,7 +23,7 @@ Given a library, lists the document that have been uploaded to that library. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -33,7 +33,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.list(library_id="05e1bda5-99b1-4baf-bb03-905d8e094f74", page_size=100, page=0, sort_by="created_at", sort_order="desc") + res = mistral.beta.libraries.documents.list(library_id="5c3ca4cd-62bc-4c71-ad8a-1531ae80d078", page_size=100, page=0, sort_by="created_at", sort_order="desc") # Handle response print(res) @@ -55,14 +55,14 @@ with Mistral( ### Response -**[models.ListDocumentOut](../../models/listdocumentout.md)** +**[models.ListDocumentsResponse](../../models/listdocumentsresponse.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## upload @@ -70,7 +70,7 @@ Given a library, upload a new document to that library. It is queued for process ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -80,7 +80,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.upload(library_id="f973c54e-979a-4464-9d36-8cc31beb21fe", file={ + res = mistral.beta.libraries.documents.upload(library_id="a02150d9-5ee0-4877-b62c-28b1fcdf3b76", file={ "file_name": "example.file", "content": open("example.file", "rb"), }) @@ -100,14 +100,14 @@ with Mistral( ### Response -**[models.DocumentOut](../../models/documentout.md)** +**[models.Document](../../models/document.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get @@ -115,7 +115,7 @@ Given a library and a document in this library, you can retrieve the metadata of ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -125,7 +125,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.get(library_id="f9902d0a-1ea4-4953-be48-52df6edd302a", document_id="c3e12fd9-e840-46f2-8d4a-79985ed36d24") + res = mistral.beta.libraries.documents.get(library_id="03d908c8-90a1-44fd-bf3a-8490fb7c9a03", document_id="90973aec-0508-4375-8b00-91d732414745") # Handle response print(res) @@ -142,14 +142,14 @@ with Mistral( ### Response -**[models.DocumentOut](../../models/documentout.md)** +**[models.Document](../../models/document.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## update @@ -157,7 +157,7 @@ Given a library and a document in that library, update the name of that document ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -167,7 +167,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.update(library_id="3b900c67-d2b6-4637-93f2-3eff2c85f8dd", document_id="66f935fd-37ec-441f-bca5-b1129befcbca") + res = mistral.beta.libraries.documents.update(library_id="3ddd8d93-dca5-4a6d-980d-173226c35742", document_id="2a25e44c-b160-40ca-b5c2-b65fb2fcae34") # Handle response print(res) @@ -186,14 +186,14 @@ with Mistral( ### Response -**[models.DocumentOut](../../models/documentout.md)** +**[models.Document](../../models/document.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -201,7 +201,7 @@ Given a library and a document in that library, delete that document. The docume ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -211,7 +211,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - mistral.beta.libraries.documents.delete(library_id="c728d742-7845-462b-84ad-2aacbaf1c7cf", document_id="ed3f5797-846a-4abe-8e30-39b2fd2323e0") + mistral.beta.libraries.documents.delete(library_id="005daae9-d42e-407d-82d7-2261c6a1496c", document_id="edc236b0-baff-49a9-884b-4ca36a258da4") # Use the SDK ... @@ -229,8 +229,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## text_content @@ -238,7 +238,7 @@ Given a library and a document in that library, you can retrieve the text conten ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -248,7 +248,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.text_content(library_id="12689dc1-50df-4a0d-8202-2757f7a8c141", document_id="9d4057e9-d112-437c-911e-6ee031389739") + res = mistral.beta.libraries.documents.text_content(library_id="1d177215-3b6b-45ba-9fa9-baf773223bec", document_id="60214c91-2aba-4692-a4e6-a53365de8caf") # Handle response print(res) @@ -271,8 +271,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## status @@ -280,7 +280,7 @@ Given a library and a document in that library, retrieve the processing status o ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -290,7 +290,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.status(library_id="41bb33c4-7e53-453d-bf21-398bb2862772", document_id="416b95cf-19c8-45af-84be-26aaa3ab3666") + res = mistral.beta.libraries.documents.status(library_id="e6906f70-368f-4155-80da-c1718f01bc43", document_id="2c904915-d831-4e9d-a345-8ce405bcef66") # Handle response print(res) @@ -313,8 +313,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get_signed_url @@ -322,7 +322,7 @@ Given a library and a document in that library, retrieve the signed URL of a spe ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -332,7 +332,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.get_signed_url(library_id="2dbbe172-1374-41be-b03d-a088c733612e", document_id="b5d88764-47f1-4485-9df1-658775428344") + res = mistral.beta.libraries.documents.get_signed_url(library_id="23cf6904-a602-4ee8-9f5b-8efc557c336d", document_id="48598486-df71-4994-acbb-1133c72efa8c") # Handle response print(res) @@ -349,14 +349,14 @@ with Mistral( ### Response -**[str](../../models/.md)** +**[str](../../models/responselibrariesdocumentsgetsignedurlv1.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## extracted_text_signed_url @@ -364,7 +364,7 @@ Given a library and a document in that library, retrieve the signed URL of text ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -374,7 +374,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.extracted_text_signed_url(library_id="46d040ce-ae2e-4891-a54c-cdab6a8f62d8", document_id="3eddbfe2-3fd7-47f5-984b-b378e6950e37") + res = mistral.beta.libraries.documents.extracted_text_signed_url(library_id="a6f15de3-1e82-4f95-af82-851499042ef8", document_id="9749d4f9-24e5-4ca2-99a3-a406863f805d") # Handle response print(res) @@ -391,14 +391,14 @@ with Mistral( ### Response -**[str](../../models/.md)** +**[str](../../models/responselibrariesdocumentsgetextractedtextsignedurlv1.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## reprocess @@ -406,7 +406,7 @@ Given a library and a document in that library, reprocess that document, it will ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -416,7 +416,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - mistral.beta.libraries.documents.reprocess(library_id="76d357e4-d891-40c6-9d1e-6d6ce5056ee0", document_id="09798d2b-8f46-46c6-9765-8054a82a4bb2") + mistral.beta.libraries.documents.reprocess(library_id="51b29371-de8f-4ba4-932b-a0bafb3a7f64", document_id="3052422c-49ca-45ac-a918-cadb35d61fd8") # Use the SDK ... @@ -434,5 +434,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 0be7ea6d..eecb5c9e 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -14,7 +14,7 @@ Embeddings ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -54,5 +54,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index ae29b7bf..9507326b 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -23,7 +23,7 @@ Please contact us if you need to increase these storage limits. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -53,13 +53,13 @@ with Mistral( ### Response -**[models.UploadFileOut](../../models/uploadfileout.md)** +**[models.CreateFileResponse](../../models/createfileresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## list @@ -67,7 +67,7 @@ Returns a list of files that belong to the user's organization. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -100,13 +100,13 @@ with Mistral( ### Response -**[models.ListFilesOut](../../models/listfilesout.md)** +**[models.ListFilesResponse](../../models/listfilesresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## retrieve @@ -114,7 +114,7 @@ Returns information about a specific file. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -124,7 +124,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.retrieve(file_id="654a62d9-b7ee-49ac-835e-af4153e3c9ec") + res = mistral.files.retrieve(file_id="f2a27685-ca4e-4dc2-9f2b-88c422c3e0f6") # Handle response print(res) @@ -140,13 +140,13 @@ with Mistral( ### Response -**[models.RetrieveFileOut](../../models/retrievefileout.md)** +**[models.GetFileResponse](../../models/getfileresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -154,7 +154,7 @@ Delete a file. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -164,7 +164,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.delete(file_id="789c27a4-69de-47c6-b67f-cf6e56ce9f41") + res = mistral.files.delete(file_id="3b6d45eb-e30b-416f-8019-f47e2e93d930") # Handle response print(res) @@ -180,13 +180,13 @@ with Mistral( ### Response -**[models.DeleteFileOut](../../models/deletefileout.md)** +**[models.DeleteFileResponse](../../models/deletefileresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## download @@ -194,7 +194,7 @@ Download a file ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -204,7 +204,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.download(file_id="e2ba278e-eac9-4050-ae8e-ec433e124efb") + res = mistral.files.download(file_id="f8919994-a4a1-46b2-8b5b-06335a4300ce") # Handle response print(res) @@ -226,7 +226,7 @@ with Mistral( | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get_signed_url @@ -234,7 +234,7 @@ Get Signed Url ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -244,7 +244,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.get_signed_url(file_id="7a0c108d-9e6b-4c47-990d-a20cba50b283", expiry=24) + res = mistral.files.get_signed_url(file_id="06a020ab-355c-49a6-b19d-304b7c01699f", expiry=24) # Handle response print(res) @@ -261,10 +261,10 @@ with Mistral( ### Response -**[models.FileSignedURL](../../models/filesignedurl.md)** +**[models.GetSignedURLResponse](../../models/getsignedurlresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index 3c8c59c7..49151bf5 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -15,7 +15,7 @@ FIM completion. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -25,7 +25,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.complete(model="codestral-2405", prompt="def", top_p=1, stream=False, suffix="return a+b") + res = mistral.fim.complete(model="codestral-latest", prompt="def", top_p=1, stream=False, suffix="return a+b") # Handle response print(res) @@ -57,8 +57,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## stream @@ -110,5 +110,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/finetuningjobs/README.md b/docs/sdks/finetuningjobs/README.md index fe18feeb..4262b3a9 100644 --- a/docs/sdks/finetuningjobs/README.md +++ b/docs/sdks/finetuningjobs/README.md @@ -16,7 +16,7 @@ Get a list of fine-tuning jobs for your organization and user. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -35,29 +35,29 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | -| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | -| `status` | [OptionalNullable[models.ListFineTuningJobsStatus]](../../models/listfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | -| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | -| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus]](../../models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response -**[models.JobsOut](../../models/jobsout.md)** +**[models.ListFineTuningJobsResponse](../../models/listfinetuningjobsresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## create @@ -65,7 +65,7 @@ Create a new fine-tuning job, it will be queued for processing. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -75,7 +75,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.create(model="Countach", hyperparameters={ + res = mistral.fine_tuning.jobs.create(model="Camaro", hyperparameters={ "learning_rate": 0.0001, }, invalid_sample_skip_percentage=0) @@ -93,23 +93,23 @@ with Mistral( | `training_files` | List[[models.TrainingFile](../../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | -| `integrations` | List[[models.JobInIntegration](../../models/jobinintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `integrations` | List[[models.CreateFineTuningJobRequestIntegration](../../models/createfinetuningjobrequestintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | | `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | | `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | | `job_type` | [OptionalNullable[models.FineTuneableModelType]](../../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | -| `repositories` | List[[models.JobInRepository](../../models/jobinrepository.md)] | :heavy_minus_sign: | N/A | -| `classifier_targets` | List[[models.ClassifierTargetIn](../../models/classifiertargetin.md)] | :heavy_minus_sign: | N/A | +| `repositories` | List[[models.CreateFineTuningJobRequestRepository](../../models/createfinetuningjobrequestrepository.md)] | :heavy_minus_sign: | N/A | +| `classifier_targets` | List[[models.ClassifierTarget](../../models/classifiertarget.md)] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response -**[models.CreateFineTuningJobResponse](../../models/createfinetuningjobresponse.md)** +**[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse](../../models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get @@ -117,7 +117,7 @@ Get a fine-tuned job details by its UUID. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -127,7 +127,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.get(job_id="2855f873-414e-4cf5-a46e-e589e39ee809") + res = mistral.fine_tuning.jobs.get(job_id="c167a961-ffca-4bcf-93ac-6169468dd389") # Handle response print(res) @@ -143,13 +143,13 @@ with Mistral( ### Response -**[models.GetFineTuningJobResponse](../../models/getfinetuningjobresponse.md)** +**[models.JobsAPIRoutesFineTuningGetFineTuningJobResponse](../../models/jobsapiroutesfinetuninggetfinetuningjobresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## cancel @@ -157,7 +157,7 @@ Request the cancellation of a fine tuning job. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -167,7 +167,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.cancel(job_id="ee7d6f03-fcbb-43ca-8f17-0388c0832eb9") + res = mistral.fine_tuning.jobs.cancel(job_id="6188a2f6-7513-4e0f-89cc-3f8088523a49") # Handle response print(res) @@ -183,13 +183,13 @@ with Mistral( ### Response -**[models.CancelFineTuningJobResponse](../../models/cancelfinetuningjobresponse.md)** +**[models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse](../../models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## start @@ -197,7 +197,7 @@ Request the start of a validated fine tuning job. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -207,7 +207,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.start(job_id="da371429-0ec2-4cea-b9c7-73ce3a1dd76f") + res = mistral.fine_tuning.jobs.start(job_id="56553e4d-0679-471e-b9ac-59a77d671103") # Handle response print(res) @@ -223,10 +223,10 @@ with Mistral( ### Response -**[models.StartFineTuningJobResponse](../../models/startfinetuningjobresponse.md)** +**[models.JobsAPIRoutesFineTuningStartFineTuningJobResponse](../../models/jobsapiroutesfinetuningstartfinetuningjobresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/libraries/README.md b/docs/sdks/libraries/README.md index 8835d0ec..7df1ef4e 100644 --- a/docs/sdks/libraries/README.md +++ b/docs/sdks/libraries/README.md @@ -18,7 +18,7 @@ List all libraries that you have created or have been shared with you. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -43,13 +43,13 @@ with Mistral( ### Response -**[models.ListLibraryOut](../../models/listlibraryout.md)** +**[models.ListLibrariesResponse](../../models/listlibrariesresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## create @@ -57,7 +57,7 @@ Create a new Library, you will be marked as the owner and only you will have the ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -85,14 +85,14 @@ with Mistral( ### Response -**[models.LibraryOut](../../models/libraryout.md)** +**[models.Library](../../models/library.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get @@ -100,7 +100,7 @@ Given a library id, details information about that Library. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -110,7 +110,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.get(library_id="44e385d6-783e-4b21-8fae-5181e6817bc4") + res = mistral.beta.libraries.get(library_id="d0d23a1e-bfe5-45e7-b7bb-22a4ea78d47f") # Handle response print(res) @@ -126,14 +126,14 @@ with Mistral( ### Response -**[models.LibraryOut](../../models/libraryout.md)** +**[models.Library](../../models/library.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -141,7 +141,7 @@ Given a library id, deletes it together with all documents that have been upload ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -151,7 +151,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.delete(library_id="441ba08a-3d1f-4700-8d6f-f32eeed49dff") + res = mistral.beta.libraries.delete(library_id="6cad0b6e-fd2e-4d11-a48b-21d30fb7c17a") # Handle response print(res) @@ -167,14 +167,14 @@ with Mistral( ### Response -**[models.LibraryOut](../../models/libraryout.md)** +**[models.Library](../../models/library.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## update @@ -182,7 +182,7 @@ Given a library id, you can update the name and description. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -192,7 +192,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.update(library_id="27049553-3425-49ce-b965-fcb3a7ab03a3") + res = mistral.beta.libraries.update(library_id="e01880c3-d0b5-4a29-8b1b-abdb8ce917e4") # Handle response print(res) @@ -210,11 +210,11 @@ with Mistral( ### Response -**[models.LibraryOut](../../models/libraryout.md)** +**[models.Library](../../models/library.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 0cbf1bdd..311a2db6 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -19,7 +19,7 @@ List all models available to the user. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -50,7 +50,7 @@ with Mistral( | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## retrieve @@ -58,7 +58,7 @@ Retrieve information about a model. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -90,8 +90,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -99,7 +99,7 @@ Delete a fine-tuned model. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -131,8 +131,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## update @@ -140,7 +140,7 @@ Update a model name or description. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -168,13 +168,13 @@ with Mistral( ### Response -**[models.UpdateModelResponse](../../models/updatemodelresponse.md)** +**[models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse](../../models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## archive @@ -182,7 +182,7 @@ Archive a fine-tuned model. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -208,13 +208,13 @@ with Mistral( ### Response -**[models.ArchiveFTModelOut](../../models/archiveftmodelout.md)** +**[models.ArchiveModelResponse](../../models/archivemodelresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## unarchive @@ -222,7 +222,7 @@ Un-archive a fine-tuned model. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -248,10 +248,10 @@ with Mistral( ### Response -**[models.UnarchiveFTModelOut](../../models/unarchiveftmodelout.md)** +**[models.UnarchiveModelResponse](../../models/unarchivemodelresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index 9fd9d6fc..fde2a823 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -14,7 +14,7 @@ OCR ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -25,10 +25,8 @@ with Mistral( ) as mistral: res = mistral.ocr.process(model="CX-9", document={ - "image_url": { - "url": "https://measly-scrap.com", - }, - "type": "image_url", + "type": "document_url", + "document_url": "https://upset-labourer.net/", }, bbox_annotation_format={ "type": "text", }, document_annotation_format={ @@ -42,22 +40,22 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | -| `document` | [models.Document](../../models/document.md) | :heavy_check_mark: | Document to run OCR on | | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | -| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | {
"type": "text"
} | -| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | {
"type": "text"
} | -| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | -| `table_format` | [OptionalNullable[models.TableFormat]](../../models/tableformat.md) | :heavy_minus_sign: | N/A | | -| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | +| `document` | [models.DocumentUnion](../../models/documentunion.md) | :heavy_check_mark: | Document to run OCR on | | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | +| `table_format` | [OptionalNullable[models.TableFormat]](../../models/tableformat.md) | :heavy_minus_sign: | N/A | | +| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -67,5 +65,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/transcriptions/README.md b/docs/sdks/transcriptions/README.md index 9691b81d..97703c9b 100644 --- a/docs/sdks/transcriptions/README.md +++ b/docs/sdks/transcriptions/README.md @@ -15,7 +15,7 @@ Create Transcription ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -25,7 +25,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.audio.transcriptions.complete(model="Model X", diarize=False) + res = mistral.audio.transcriptions.complete(model="voxtral-mini-latest", diarize=False) # Handle response print(res) @@ -36,7 +36,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to be used. | voxtral-mini-latest | +| `model` | *str* | :heavy_check_mark: | ID of the model to be used. | **Example 1:** voxtral-mini-latest
**Example 2:** voxtral-mini-2507 | | `file` | [Optional[models.File]](../../models/file.md) | :heavy_minus_sign: | N/A | | | `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | | | `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | | @@ -55,7 +55,7 @@ with Mistral( | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## stream @@ -105,4 +105,4 @@ with Mistral( | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/examples/mistral/audio/chat_streaming.py b/examples/mistral/audio/chat_streaming.py index a9ab2323..b418ef57 100755 --- a/examples/mistral/audio/chat_streaming.py +++ b/examples/mistral/audio/chat_streaming.py @@ -2,7 +2,8 @@ import os -from mistralai.client import Mistral, File +from mistralai.client import Mistral +from mistralai.client.models import File from mistralai.client.models import UserMessage diff --git a/examples/mistral/audio/transcription_async.py b/examples/mistral/audio/transcription_async.py index c8fd9ae6..f04f397e 100644 --- a/examples/mistral/audio/transcription_async.py +++ b/examples/mistral/audio/transcription_async.py @@ -2,7 +2,8 @@ import os import asyncio -from mistralai.client import Mistral, File +from mistralai.client import Mistral +from mistralai.client.models import File async def main(): diff --git a/examples/mistral/audio/transcription_diarize_async.py b/examples/mistral/audio/transcription_diarize_async.py index cbdf3512..4b511c87 100644 --- a/examples/mistral/audio/transcription_diarize_async.py +++ b/examples/mistral/audio/transcription_diarize_async.py @@ -3,7 +3,8 @@ import os import asyncio import pathlib -from mistralai.client import Mistral, File +from mistralai.client import Mistral +from mistralai.client.models import File fixture_dir = pathlib.Path(__file__).parents[2] / "fixtures" diff --git a/examples/mistral/audio/transcription_stream_async.py b/examples/mistral/audio/transcription_stream_async.py index 6e64dcf7..3055f3de 100644 --- a/examples/mistral/audio/transcription_stream_async.py +++ b/examples/mistral/audio/transcription_stream_async.py @@ -2,7 +2,8 @@ import asyncio import os -from mistralai.client import Mistral, File +from mistralai.client import Mistral +from mistralai.client.models import File async def main(): diff --git a/examples/mistral/classifier/async_classifier.py b/examples/mistral/classifier/async_classifier.py index 45cc14fa..881f6a69 100644 --- a/examples/mistral/classifier/async_classifier.py +++ b/examples/mistral/classifier/async_classifier.py @@ -2,8 +2,8 @@ from pprint import pprint import asyncio -from mistralai.client import Mistral, TrainingFile, ClassifierTrainingParametersIn -from mistralai.client.models import ClassifierJobOut +from mistralai.client import Mistral +from mistralai.client.models import ClassifierFineTuningJob, ClassifierFineTuningJobDetails, ClassifierTrainingParameters, TrainingFile import os @@ -36,12 +36,12 @@ async def train_classifier(client: Mistral, training_file_ids: list[str]) -> str TrainingFile(file_id=training_file_id) for training_file_id in training_file_ids ], - hyperparameters=ClassifierTrainingParametersIn( + hyperparameters=ClassifierTrainingParameters( learning_rate=0.0001, ), auto_start=True, ) - if not isinstance(job, ClassifierJobOut): + if not isinstance(job, ClassifierFineTuningJob): print("Unexpected job type returned") return None @@ -51,6 +51,8 @@ async def train_classifier(client: Mistral, training_file_ids: list[str]) -> str while True: await asyncio.sleep(10) detailed_job = await client.fine_tuning.jobs.get_async(job_id=job.id) + if not isinstance(detailed_job, ClassifierFineTuningJobDetails): + raise Exception(f"Unexpected job type: {type(detailed_job)}") if detailed_job.status not in [ "QUEUED", "STARTED", diff --git a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py index 8b4cedd3..d2a1679f 100644 --- a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py +++ b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py @@ -1,4 +1,5 @@ -from mistralai.client import Mistral, BatchRequest, UserMessage +from mistralai.client import Mistral +from mistralai.client.models import BatchRequest, UserMessage import os import asyncio diff --git a/packages/azure/.speakeasy/gen.lock b/packages/azure/.speakeasy/gen.lock index 5cf1d8e1..c795c61c 100644 --- a/packages/azure/.speakeasy/gen.lock +++ b/packages/azure/.speakeasy/gen.lock @@ -3,46 +3,46 @@ id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: docChecksum: 571037b8485712afcef86703debb7f15 docVersion: 1.0.0 - speakeasyVersion: 1.685.0 - generationVersion: 2.794.1 - releaseVersion: 2.0.0a4 - configChecksum: 549cf1eae199d39bf97052462fd8e640 + speakeasyVersion: 1.729.0 + generationVersion: 2.841.0 + releaseVersion: 2.0.0-a4.1 + configChecksum: e2523ba89eba35872d05ddb673dd862a repoURL: https://github.com/mistralai/client-python.git repoSubDirectory: packages/azure installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/azure published: true persistentEdits: - generation_id: b0dbfbbb-4028-4834-9980-a1d2dba52a8d - pristine_commit_hash: 6cab3cf0757d3c7dd58ee1eabec66dd63a8c9a03 - pristine_tree_hash: abf5c6e4b603142b1a6aac936d7c3be574611256 + generation_id: 1812b54a-0aa7-4b43-8c53-d70427856543 + pristine_commit_hash: 28db2945de995b5707dc7f310b5291435aaafcbf + pristine_tree_hash: b01973b36166a61d38fa84cf7dae49b7a74e1402 features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 configurableModuleName: 0.2.0 - constsAndDefaults: 1.0.5 - core: 5.23.18 + constsAndDefaults: 1.0.7 + core: 6.0.12 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 examples: 3.0.2 flatRequests: 1.0.1 - globalSecurity: 3.0.4 + globalSecurity: 3.0.5 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.2.0 includes: 3.0.0 methodArguments: 1.0.2 - nameOverrides: 3.0.1 - nullables: 1.0.1 - openEnums: 1.0.2 - responseFormat: 1.0.1 - retries: 3.0.3 - sdkHooks: 1.2.0 - serverEvents: 1.0.11 + nameOverrides: 3.0.3 + nullables: 1.0.2 + openEnums: 1.0.4 + responseFormat: 1.1.0 + retries: 3.0.4 + sdkHooks: 1.2.1 + serverEvents: 1.0.13 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.1.1 + unions: 3.1.4 trackedFiles: .gitattributes: id: 24139dae6567 @@ -52,6 +52,10 @@ trackedFiles: id: 89aa447020cd last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + docs/errors/httpvalidationerror.md: + id: 7fe2e5327e07 + last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e + pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc docs/models/arguments.md: id: 7ea5e33709a7 last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec @@ -74,8 +78,8 @@ trackedFiles: pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b docs/models/chatcompletionrequest.md: id: adffe90369d0 - last_write_checksum: sha1:a404d37c6605a5524f1f48b418bacf46e86a9a68 - pristine_git_object: 3b0f7270840e257475f4b0f15f27e0c0152818d2 + last_write_checksum: sha1:00453565d70739471a4e1872c93b5b7e66fe6cb6 + pristine_git_object: f8715cd0a335c6dc0fda4b60400f11c4aa8a0a06 docs/models/chatcompletionrequestmessage.md: id: 3f5e170d418c last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 @@ -94,8 +98,8 @@ trackedFiles: pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b docs/models/chatcompletionstreamrequest.md: id: cf8f29558a68 - last_write_checksum: sha1:daca00885f0d0f9863d8420bbee514723084813d - pristine_git_object: f78156a647ec63ca60ff423acbdee2b2404e4e60 + last_write_checksum: sha1:7233a19b12f3204b8e2259a4a09d0d9726609e4e + pristine_git_object: cc82a8c707268084865f86d71be82de5ebf6f821 docs/models/chatcompletionstreamrequestmessage.md: id: 053a98476cd2 last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 @@ -164,10 +168,6 @@ trackedFiles: id: 4b3bd62c0f26 last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb pristine_git_object: 87d7b4852de629015166605b273deb9341202dc0 - docs/models/httpvalidationerror.md: - id: a211c095f2ac - last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e - pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc docs/models/imagedetail.md: id: f8217529b496 last_write_checksum: sha1:fdf19ac9459f64616240955cb81a84ef03e775c8 @@ -210,8 +210,8 @@ trackedFiles: pristine_git_object: 02473d44f73485fd7b7f0031d51bfac835d4036e docs/models/ocrrequest.md: id: 6862a3fc2d0f - last_write_checksum: sha1:9311e2c87f8f4512c35a717d3b063f2861f878d4 - pristine_git_object: 87929e53f8a74823b82ecce56d15f22228134fa6 + last_write_checksum: sha1:eefa8ad80773e00ac297f3cf806704ac6ac3557d + pristine_git_object: 2d26c19fd1cecb234d7fb761dd73cc0a59e622ad docs/models/ocrresponse.md: id: 30042328fb78 last_write_checksum: sha1:8e4a4ae404ea752f3e9f1108c2a5f89ed6cfb143 @@ -326,8 +326,8 @@ trackedFiles: pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 pylintrc: id: 7ce8b9f946e6 - last_write_checksum: sha1:6b615d49741eb9ae16375d3a499767783d1128a1 - pristine_git_object: a8fcb932ba2a01c5e96e3b04c59371e930b75558 + last_write_checksum: sha1:8f871a5aac4b10bff724c9d91b8d7496eb1fbdde + pristine_git_object: 0391ac11bdc5526b697b69d047d568a611ce87d0 scripts/prepare_readme.py: id: e0c5957a6035 last_write_checksum: sha1:26b29aad3c23a98912fd881698c976aac55749fe @@ -338,8 +338,8 @@ trackedFiles: pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 src/mistralai/azure/client/__init__.py: id: 5624bda9196d - last_write_checksum: sha1:36306d1d404b6aeb912d27f1d9c52f098ff7bf9b - pristine_git_object: dd02e42e4cc509dc90e6ae70493054021faa5f9c + last_write_checksum: sha1:da077c0bdfcef64a4a5aea91a17292f72fa2b088 + pristine_git_object: 833c68cd526fe34aab2b7e7c45f974f7f4b9e120 src/mistralai/azure/client/_hooks/__init__.py: id: 850c237217cb last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d @@ -354,76 +354,100 @@ trackedFiles: pristine_git_object: 3e4e39555d60adebe84e596c8323ee5b80676fc9 src/mistralai/azure/client/_version.py: id: a77160e60e5d - last_write_checksum: sha1:e26eb828e9a240042acc754f38dcf2e581e045aa - pristine_git_object: 4448d2a0fd803f43820378359c921d09eba6f43e + last_write_checksum: sha1:b1d1971d43e8f92bd55bb45653a228fd9de97af3 + pristine_git_object: 4f985cc69c492521664044337e5910f8e5a26b90 src/mistralai/azure/client/basesdk.py: id: 5a585a95ec21 - last_write_checksum: sha1:d7a4a959d7d3ca3cd22d8daf144c3b4d5c0d1210 - pristine_git_object: b0391ac078b4e2a5d9107ed014c1ca939a553c23 + last_write_checksum: sha1:0c2e686aa42d6aeeb103193aa058d6ddff7bcf74 + pristine_git_object: 0d4d9a440e6c7726b6bc7fc6525aa3dc009847eb src/mistralai/azure/client/chat.py: id: c18454e628d7 - last_write_checksum: sha1:cc1ff54b85ce494428ebf22ec01bd1199cd9e2b6 - pristine_git_object: 3348bf47eafb3fcfb2de0e7d512073e947b69554 + last_write_checksum: sha1:884e22b0e313662c67cec7101765d8d7ef0bc48a + pristine_git_object: 1051f9527851894988f7e1689923575cf72a0896 + src/mistralai/azure/client/errors/__init__.py: + id: f377703514d9 + last_write_checksum: sha1:36c516c11f8083c3380a72c1d0f0718a3345f24b + pristine_git_object: 79e2712c2e62121fb6dbaab15ca8487f0e16b07c + src/mistralai/azure/client/errors/httpvalidationerror.py: + id: c3ec0ad923e9 + last_write_checksum: sha1:f45b41c1ad980c5d481158209bf23fa795cc68bc + pristine_git_object: b4f2691e630a095ff09fbbce5e2ea3063592084f + src/mistralai/azure/client/errors/mistralazureerror.py: + id: fae868afae89 + last_write_checksum: sha1:25f4411c7411faad753d46118edf74828b1c9f7c + pristine_git_object: c5bf17528c7cf25bac8f8874f58692c601fcdd76 + src/mistralai/azure/client/errors/no_response_error.py: + id: b838df044e62 + last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f + pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 + src/mistralai/azure/client/errors/responsevalidationerror.py: + id: 77ac5e93cdda + last_write_checksum: sha1:c1e045dbdda0199bc1d563819c0b38e877d0efef + pristine_git_object: 02397334d2b3bf2516808b69b2548564f650cbe0 + src/mistralai/azure/client/errors/sdkerror.py: + id: dfdd4b1d8928 + last_write_checksum: sha1:edc2baf6feb199e1b1ff1aad681622b44804299d + pristine_git_object: c4f3616cd2720a9b5d2a2c5b2d22a305629ebbe6 src/mistralai/azure/client/httpclient.py: id: 60c81037fbd0 last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 pristine_git_object: 89560b566073785535643e694c112bedbd3db13d src/mistralai/azure/client/models/__init__.py: id: "335011330e21" - last_write_checksum: sha1:9afe0f0fb324a2b3c60ec98ce78b1ff6f908db39 - pristine_git_object: 51db6a383ddbab2d946b00c41934359a7eb50448 + last_write_checksum: sha1:07054ca95df60a3f03d8ea37a361aa506f94b78b + pristine_git_object: 908dda32cebe894b37dccaaa9b84db174ac93c21 src/mistralai/azure/client/models/assistantmessage.py: id: 353ed9110f97 - last_write_checksum: sha1:e444c76e27b9b745b9238894bdf2b6a40bba6e6e - pristine_git_object: f5793f9455485c576293b44fb548be8bae9c7a65 + last_write_checksum: sha1:973979ac03f86f26ee9a540aaaa8f70a7011daca + pristine_git_object: e9ae6e82c3c758561c8c9663f27b2fd7e38d2911 src/mistralai/azure/client/models/chatcompletionchoice.py: id: 6942c7db5891 last_write_checksum: sha1:817bfda6120a98248322c308629e404081e01279 pristine_git_object: 67b5ba694217f4f3b95589d7f84af6a9bea9802d src/mistralai/azure/client/models/chatcompletionrequest.py: id: 0c711c870184 - last_write_checksum: sha1:fae2a92375aa3e58c258e4497acead859cd3b6dc - pristine_git_object: 921790959880ddf9b9ffce15d881e01f8adefa86 + last_write_checksum: sha1:ffdd11a4945dd805c9a73328749c2f4d9b6f80e6 + pristine_git_object: edd0fdc74a1b81f458d6083e79dc393e488da36a src/mistralai/azure/client/models/chatcompletionresponse.py: id: bdfacf065e9e last_write_checksum: sha1:c72fb624e7475a551d37e0b291b64bcf772c402a pristine_git_object: d41f9c6fab670cf7c961f50b1302f9a88cf48162 src/mistralai/azure/client/models/chatcompletionstreamrequest.py: id: da00a7feb4ef - last_write_checksum: sha1:c8c84c818b3b22bfec1e7f5737bbb281088dd3ba - pristine_git_object: be21eed2ecbe8354eb9a4bfa48122b28dada4aaf + last_write_checksum: sha1:8bb36693fed73a50d59687ca8b30a2c443708610 + pristine_git_object: 2edfbed98462eab43f322b9c706721365e410bb9 src/mistralai/azure/client/models/completionchunk.py: id: 28d620f25510 - last_write_checksum: sha1:413545e0521539346bff6e77fdec0c9e383bde17 - pristine_git_object: b94284b2d9c29c25a2f8eaa02828e2a205f4407e + last_write_checksum: sha1:84d1c55ef7bdb438e7f536a604a070799d054281 + pristine_git_object: 0e64bbc8aa0293c9d763db56287f296909260c38 src/mistralai/azure/client/models/completionevent.py: id: a6f00a747933 last_write_checksum: sha1:3d04bfbdaf11c52af5613ed0fd70c8dbc59f6d49 pristine_git_object: c4b272871d9b3ea8443f469d29b0825706c25c00 src/mistralai/azure/client/models/completionresponsestreamchoice.py: id: 3ba5d7ba8a13 - last_write_checksum: sha1:f917300daf4febec7661f2c73bae675600ee0bdd - pristine_git_object: 2a4d053feb84cf2a9675d76ae08c83945b26644c + last_write_checksum: sha1:4de311509c71c8f582b2c767febea89f1acd341a + pristine_git_object: 20a271401ff98d69525947ab929078af83aab1f1 src/mistralai/azure/client/models/contentchunk.py: id: 1f65e4f8f731 - last_write_checksum: sha1:79efbc90c1ae36b74492666125fb3e5ecaa5c27a - pristine_git_object: 0f09f76703efd95fcd96377b8ec6870d58dbf829 + last_write_checksum: sha1:cf11e1f061d3c8af040ebbdba0b25d4177e1cea4 + pristine_git_object: 17efcc7d5825461576cf61257908688cffd23eb7 src/mistralai/azure/client/models/deltamessage.py: id: b7dab1d158de - last_write_checksum: sha1:553fdff5a3aec6909417be3cb390d99421af1693 - pristine_git_object: 2c01feae56c44d256f1e579c15f08e167dcc6481 + last_write_checksum: sha1:190c2809d575244eda5efbb1e00a4ec5811aea29 + pristine_git_object: 567e772fc1b376efaec1a2dfd660bc74a916f8ee src/mistralai/azure/client/models/documenturlchunk.py: id: e56fec6e977f - last_write_checksum: sha1:a43cee08f935933bf715b2f1a82b4c746b591f35 - pristine_git_object: 345bafc2bfe3cc056d746cf8151cf53b68771414 + last_write_checksum: sha1:0313d94f343d46dac7cc3adc392feaf06fa2b2a4 + pristine_git_object: 2dea80056f6752bdaa5d00f391cb6f54371a9d2b src/mistralai/azure/client/models/filechunk.py: id: 150d9f180110 - last_write_checksum: sha1:df1e010006338f6dd37009f2547ab8f0b90b917a - pristine_git_object: 829f03d84c25dd859d514ffa26e570f235e4e75b + last_write_checksum: sha1:6d12d630a5bfd601836f9cb3d63b9eb2f15f880d + pristine_git_object: 6baa0cba81535e157c0f81ae2648362f7bd1adbd src/mistralai/azure/client/models/function.py: id: 6d1e2011a14b - last_write_checksum: sha1:62df160db82853d79907cccff4d0904f6bb9f142 - pristine_git_object: f4edce0fb8563f485d9a63a42439a9b2593a7f40 + last_write_checksum: sha1:b064eca9256966603581d41b5b2c08cd2448224d + pristine_git_object: 055d3657fd98da63b80deb8cd2054e95a0e66a2b src/mistralai/azure/client/models/functioncall.py: id: ced560a1bd57 last_write_checksum: sha1:490cb3a0305994de063e06fa4c77defa911271f3 @@ -432,150 +456,130 @@ trackedFiles: id: 6f09474ebc85 last_write_checksum: sha1:651ceed24416ce8192f70db03cc5cd0db685899f pristine_git_object: 839e0d557a902da6c819210962e38e1df9bda90f - src/mistralai/azure/client/models/httpvalidationerror.py: - id: ca155413681b - last_write_checksum: sha1:9dea33d9c74bbdf842ee9d157e4aaa05c36ae34a - pristine_git_object: 40bccddc4d0c0e761d70af713387561101e20b60 src/mistralai/azure/client/models/imagedetail.py: id: de211988043d last_write_checksum: sha1:812f2ec4fc0d8d13db643ed49192384d5a841aa4 pristine_git_object: 2d074cee614e1c49b69ee4073c3aaaa7a5a2c9e2 src/mistralai/azure/client/models/imageurl.py: id: c8882341c798 - last_write_checksum: sha1:443ee3739b3801928b4f3d4256531078fc4045e8 - pristine_git_object: b3c705e3f261ebd59f40e46785577694d80f98bf + last_write_checksum: sha1:8c3c08cc5d33c66b12539270b7edbf157d936f86 + pristine_git_object: bcb4fe43d334752be501d694543250d7e632a9c7 src/mistralai/azure/client/models/imageurlchunk.py: id: b6f0abb574d7 - last_write_checksum: sha1:4651f12f779bc86874c8516f06e39b882e414c92 - pristine_git_object: ee6de50f2add830c19d0b8b030a7c7a2ab65cb11 + last_write_checksum: sha1:417618d9d2aba85386a100dfe818d13342830526 + pristine_git_object: 7213c49846a4107271d017dd695648d98c2efa94 src/mistralai/azure/client/models/jsonschema.py: id: bfd486f4bb18 - last_write_checksum: sha1:ffe7190393086a4301aaffa6854cb3d80b0db92f - pristine_git_object: 5aaa490af350ac1c436dafb3d3c73d56402cac11 - src/mistralai/azure/client/models/mistralazureerror.py: - id: 31ed29254e67 - last_write_checksum: sha1:25f4411c7411faad753d46118edf74828b1c9f7c - pristine_git_object: c5bf17528c7cf25bac8f8874f58692c601fcdd76 + last_write_checksum: sha1:ccb2b53bd2351ec5119d9a7914a1a42c2746a096 + pristine_git_object: 99f2fb8903562465687edfd300d8efd373b92247 src/mistralai/azure/client/models/mistralpromptmode.py: id: d0028b1e4129 last_write_checksum: sha1:46fe1ab8ac2d5867877368a59a4aa5be2fabadeb pristine_git_object: 26e7adbdc4a981c92d51b72542c966b0ba0fb8f8 - src/mistralai/azure/client/models/no_response_error.py: - id: a956d6cd06f0 - last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f - pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 src/mistralai/azure/client/models/ocrimageobject.py: id: 9c9f987d94bb - last_write_checksum: sha1:b86f5187d1c425ddf27ed4815657a7c41d71855c - pristine_git_object: 38e9d3e48df5cee8cdd0cd1d7b6df62182814104 + last_write_checksum: sha1:423effee97a4120a26ba78c2abe7f6adeb5c733d + pristine_git_object: a23515b346a0f9517fec0b2381e1b0c04cb31816 src/mistralai/azure/client/models/ocrpagedimensions.py: id: 7669a25f32b3 last_write_checksum: sha1:60642db6bb61f0e96204fb78d3aa0bd80dd0a7e5 pristine_git_object: 12858da92de99aa6da9d6e148df3ba7ee37496c7 src/mistralai/azure/client/models/ocrpageobject.py: id: eea193b05126 - last_write_checksum: sha1:baada584537b75e2e184738424068e61afe263c7 - pristine_git_object: 5fb821c19fd3cca2c2e149bd058a7ca49d2d002b + last_write_checksum: sha1:b8370ac0611dc3eccf09dddf85d1c39d3a11224b + pristine_git_object: 434c8988f124f93180e6cefa15b3aee067937946 src/mistralai/azure/client/models/ocrrequest.py: id: 365a5b4776a2 - last_write_checksum: sha1:9d3a9bccd341219934470688d3818557231b9b62 - pristine_git_object: fece2713166fc943194b7b38ec9b82db295bba0a + last_write_checksum: sha1:e684da1b6db18cb9c5ce95b9cc58556e05a9ea9b + pristine_git_object: a2cd341593c9db3644076d39352abca6815efc56 src/mistralai/azure/client/models/ocrresponse.py: id: b8cde8c16a4c - last_write_checksum: sha1:e6f08c68f0388919ca7bcbc4f0cb134525053fcd - pristine_git_object: 787289fa995ba6cbf4b2ef3d3c41edb31f656674 + last_write_checksum: sha1:55e81631f6fe57aaf58178460e1c5fc69fa19377 + pristine_git_object: 3dc09fd770a064e69e84519bd0f0c9127ebd8176 src/mistralai/azure/client/models/ocrtableobject.py: id: c2cd51b8789e - last_write_checksum: sha1:11052d42f0d91916f038437923ea656bf882032c - pristine_git_object: 3e3c25830a3216f4ef325f5b1056a0c1a267b090 + last_write_checksum: sha1:86a8fd2241cf6a636e81e58484a90bdb7880085e + pristine_git_object: f1de5428a71f9d42cd9f9e764d0bbf88f3aad8cc src/mistralai/azure/client/models/ocrusageinfo.py: id: 5e9118cac468 - last_write_checksum: sha1:6b27c09b5ec447c6ede22aa75190a1e06353349c - pristine_git_object: e2ceba35eb3f6e148389a7fd466dea5c051480a4 + last_write_checksum: sha1:97887b58cfe6ebd9ebd5905c6c7485525d6dc788 + pristine_git_object: f63315d23a1659aee4333b45c4239861aa5220d7 src/mistralai/azure/client/models/prediction.py: id: bd6abfa93083 - last_write_checksum: sha1:87eb3c43fa31b245c13c4708602b300956aa9efb - pristine_git_object: 6b8d6480b9ba1cb6683bdc93c24fb762ccfba146 + last_write_checksum: sha1:07d06d5629af183f999e043744a67868ef779bcc + pristine_git_object: 1fa1d78248628ccdc102ce0631d344150addfd2d src/mistralai/azure/client/models/referencechunk.py: id: c9612f854670 - last_write_checksum: sha1:b96507bcc82939fa4057532ef7e6a440baabd973 - pristine_git_object: e0bcb06be4d4c8d947ee267a9728aeae3a2c52fe + last_write_checksum: sha1:e81e758e00db915e68f58ffa1e03b2c473f64477 + pristine_git_object: f7af9bf9a73e0d782e5e6c6a7866af6fbc3668d8 src/mistralai/azure/client/models/responseformat.py: id: c124e7c316aa - last_write_checksum: sha1:f8c9e581053d1d885196c210a219a3e7aa086610 - pristine_git_object: 39fb03a25efdbc0a92ea91c72038ddd86ee056be + last_write_checksum: sha1:d368a2d4320356b6daab1dd0c62c6c862e902ca0 + pristine_git_object: 20fd2b868506cff278d1d7dc719eddd56ea538b0 src/mistralai/azure/client/models/responseformats.py: id: fef416cefcd4 last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 - src/mistralai/azure/client/models/responsevalidationerror.py: - id: afdb9463b434 - last_write_checksum: sha1:26f01befeb347a63928012e7eb36c95a8a392145 - pristine_git_object: cbdffcbba45a988805cdd52d111e77b0ca777dbf - src/mistralai/azure/client/models/sdkerror.py: - id: 4601c7297af7 - last_write_checksum: sha1:b54041f9751e1f2a38dd02a6f8eadb3907fa3df0 - pristine_git_object: a1e9aacaa2fcc839dcb2638788dd7c94298adee7 src/mistralai/azure/client/models/security.py: id: 4a2e4760ec08 last_write_checksum: sha1:0cd2ae54cecd88cfd8d43e92c0d3da7efa48942c pristine_git_object: 9b83ba98336090bed89fbeda40b4a07b212a1106 src/mistralai/azure/client/models/systemmessage.py: id: 8fa0dee9e4e1 - last_write_checksum: sha1:2b52c44b92a098b559ec8b7a80449532169cd317 - pristine_git_object: 38c280c809148e190e329619858718d132da6bc0 + last_write_checksum: sha1:26167db704ece6ef1391d6f474e00f417bff4639 + pristine_git_object: d4bd004476ef653798295fa5df9de68b607f0132 src/mistralai/azure/client/models/systemmessagecontentchunks.py: id: 5918e770869d - last_write_checksum: sha1:55529f2f29ba3087fbf117dbbe64e1dda92b2958 - pristine_git_object: 225f38b712f5f3c7abfd526cc8c0386687814f36 + last_write_checksum: sha1:d1f96498cbb540b91425e70ffa33892ff4d1c8cd + pristine_git_object: 8de71c909eda2ed0166a6be8f8ee029956e5766b src/mistralai/azure/client/models/textchunk.py: id: 9c81c76a6325 - last_write_checksum: sha1:d1c9eaffeb80299f023351dc8d07eb53e49133f2 - pristine_git_object: e513c1434cc7a4766bb9ef039ad8eed2bf0c12ca + last_write_checksum: sha1:28b8f4e030d365e5bf2f2f2720a7919b29616564 + pristine_git_object: 9295148588a143278ff5f48f9142347e35cfdab2 src/mistralai/azure/client/models/thinkchunk.py: id: df6bbd55b3eb - last_write_checksum: sha1:ec9af4cb7faa6ba8ed033b37db1d1d5a1406ac3f - pristine_git_object: e769399fe6ba90ddb2503f8fadb4b6cebc7d6f85 + last_write_checksum: sha1:752a81be169fdd7a6afc293cf090b2cd4d2b22c9 + pristine_git_object: 4e881aad3b11d43aecaab922fe55bf7b4076c42f src/mistralai/azure/client/models/tool.py: id: 4075ef72c086 - last_write_checksum: sha1:0c041eaa008ee1851e05bf90e57602c0338f362f - pristine_git_object: 169305bc4c538e88b1e0cf1120aa10e424118880 + last_write_checksum: sha1:4bef6d64b6426fdeff5031557c3c0e37f5c33b9a + pristine_git_object: 87329bdb73526120a3f63d48299114485a7fe038 src/mistralai/azure/client/models/toolcall.py: id: c65e6f79e539 - last_write_checksum: sha1:dd2290e019322e9df73b119e054a1d738eb5f3ba - pristine_git_object: a589b1b38ef4caaba2753f8335228bc16cd68961 + last_write_checksum: sha1:a3b36214b4533b79868630348762206a0e5ca26e + pristine_git_object: ada1ea65136fa58dce55f2857d895ea916bcd41f src/mistralai/azure/client/models/toolchoice.py: id: c25062b5de34 - last_write_checksum: sha1:db82f8d3f811461226cffbeacf2699103a5e0689 - pristine_git_object: 1f623222084f12eaa63f2cea656dc7da10b12a3a + last_write_checksum: sha1:6212c9366eb3b4f4062c86c00d4502dd03bf5ce1 + pristine_git_object: ddb9e1417c880c44a7f0505bfde839570fa3cd4a src/mistralai/azure/client/models/toolchoiceenum.py: id: cc06ba3a8d21 last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 src/mistralai/azure/client/models/toolmessage.py: id: 84ac736fa955 - last_write_checksum: sha1:11841bba4b66179321a35ea1a4d4d3571fa997b7 - pristine_git_object: a73fd6bf8355043f1b40caf7e8b9ded90c1fda0f + last_write_checksum: sha1:e4ed14906985fe74fd76a9adb09125ebc1218a1f + pristine_git_object: 670210de0d05b52ee9dffbbb808a87e67c2d37a9 src/mistralai/azure/client/models/tooltypes.py: id: fa881b046d34 last_write_checksum: sha1:cd28ddc02fff9a5abbb59c82fe9e0dcbdb9b6d2a pristine_git_object: 1cce7446f2772b998208ea1c78c7969e3881d5d0 src/mistralai/azure/client/models/usageinfo.py: id: 3edc9c81b329 - last_write_checksum: sha1:0b2117285b13d846a25c6c59436c4628b9d84a03 - pristine_git_object: 19a6b09fb63a3732719c45f8dfca92cfc2c57353 + last_write_checksum: sha1:0ac2350e4efa1ed3ffd7d33ac91c3ef564d1d773 + pristine_git_object: 0f04c87c97ff3148106408a46618c848b86c4b37 src/mistralai/azure/client/models/usermessage.py: id: 3796508adc07 - last_write_checksum: sha1:f4baa9d8b8f99f715873cea83191baf055c3296a - pristine_git_object: 96439c64a979ac3edf8900d39154d706846a3a95 + last_write_checksum: sha1:8eb35fb07971d74cf2cb0858c037558f52df6aa9 + pristine_git_object: 549b01ca887651a95c5efc8aff3372d32dfdc277 src/mistralai/azure/client/models/validationerror.py: id: f2b84813e2ea last_write_checksum: sha1:f0f9706a5af2ac4f6b234e768fdd492bbdd8a18c pristine_git_object: 817ecf7a56470369ccacd0f5e0bb739656a5f92c src/mistralai/azure/client/ocr.py: id: 5817c10c9297 - last_write_checksum: sha1:24fec22877024154ea417e31ea443b4795c443ba - pristine_git_object: 098e764b6580e35ad0e81242ca601ce821656ee9 + last_write_checksum: sha1:7666ca9f4596cee080952b2f4096bd4176051680 + pristine_git_object: b9270f6a52406d8a9bf02d90c24ae540da6dfb9d src/mistralai/azure/client/py.typed: id: e88369f116d2 last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 @@ -594,8 +598,8 @@ trackedFiles: pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee src/mistralai/azure/client/utils/__init__.py: id: 26f1a707325b - last_write_checksum: sha1:887f56a717845fab7445cc368d2a17d850c3565a - pristine_git_object: 05f26ade57efb8c54a774fbcb939fb1a7dc655ce + last_write_checksum: sha1:3ad22a588864c93bd3a16605f669955b5f3b8053 + pristine_git_object: b488c2df1390b22be3050eee72832a91c76d5385 src/mistralai/azure/client/utils/annotations.py: id: bb1f6c189fdb last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc @@ -604,18 +608,22 @@ trackedFiles: id: 2b7db09ee0ab last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 + src/mistralai/azure/client/utils/dynamic_imports.py: + id: 0ac779c122d9 + last_write_checksum: sha1:a1940c63feb8eddfd8026de53384baf5056d5dcc + pristine_git_object: 673edf82a97d0fea7295625d3e092ea369a36b79 src/mistralai/azure/client/utils/enums.py: id: ffbdb1917a68 last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 src/mistralai/azure/client/utils/eventstreaming.py: id: bdc37b70360c - last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b - pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc + last_write_checksum: sha1:ffa870a25a7e4e2015bfd7a467ccd3aa1de97f0e + pristine_git_object: f2052fc22d9fd6c663ba3dce019fe234ca37108b src/mistralai/azure/client/utils/forms.py: id: 51696122c557 - last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 - pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 + last_write_checksum: sha1:0ca31459b99f761fcc6d0557a0a38daac4ad50f4 + pristine_git_object: 1e550bd5c2c35d977ddc10f49d77c23cb12c158d src/mistralai/azure/client/utils/headers.py: id: e42840c8cb13 last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 @@ -638,20 +646,24 @@ trackedFiles: pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c src/mistralai/azure/client/utils/retries.py: id: d50ed6e400b2 - last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 - pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 + last_write_checksum: sha1:471372f5c5d1dd5583239c9cf3c75f1b636e5d87 + pristine_git_object: af07d4e941007af4213c5ec9047ef8a2fca04e5e src/mistralai/azure/client/utils/security.py: id: 1d35741ce5f1 - last_write_checksum: sha1:a17130ace2c0db6394f38dd941ad2b700cc755c8 - pristine_git_object: 295a3f40031dbb40073ad227fd4a355660f97ab2 + last_write_checksum: sha1:435dd8b180cefcd733e635b9fa45512da091d9c0 + pristine_git_object: 17996bd54b8624009802fbbdf30bcb4225b8dfed src/mistralai/azure/client/utils/serializers.py: id: a1f26d73c3ad last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 + src/mistralai/azure/client/utils/unions.py: + id: 9abcc9913e3f + last_write_checksum: sha1:6e38049f323e0b5fb4bd0e88ab51ec447197ccb0 + pristine_git_object: a227f4e87be22fce682fcae5813b71835199ec5e src/mistralai/azure/client/utils/unmarshal_json_response.py: id: 947f4fc4db62 - last_write_checksum: sha1:99bd357d24d2236e3974630d9bd18bae22610cbc - pristine_git_object: 5317ac87097ccb35628202cf7fc5cb21e186855f + last_write_checksum: sha1:75931131ff498a66a48cfb32dd9d5d61f2c9b4d1 + pristine_git_object: fe0c9b8ecabf8f89e363a050837582df40d67fb4 src/mistralai/azure/client/utils/url.py: id: 4976c88d0e3b last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 diff --git a/packages/azure/.speakeasy/gen.yaml b/packages/azure/.speakeasy/gen.yaml index 729cdfcf..0b7262e0 100644 --- a/packages/azure/.speakeasy/gen.yaml +++ b/packages/azure/.speakeasy/gen.yaml @@ -13,8 +13,9 @@ generation: requestResponseComponentNamesFeb2024: true securityFeb2025: true sharedErrorComponentsApr2025: true - methodSignaturesApr2024: true sharedNestedComponentsJan2026: true + nameOverrideFeb2026: true + methodSignaturesApr2024: true auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -22,31 +23,37 @@ generation: schemas: allOfMergeStrategy: shallowMerge requestBodyFieldName: "" + versioningStrategy: automatic persistentEdits: {} tests: generateTests: true generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0a4 + version: 2.0.0-a4.1 additionalDependencies: dev: pytest: ^8.2.2 pytest-asyncio: ^0.23.7 + main: {} allowedRedefinedBuiltins: - id - object + - input + - dir asyncMode: both authors: - Mistral baseErrorName: MistralAzureError clientServerStatusCodesAsErrors: true - constFieldCasing: upper + constFieldCasing: normal defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in Azure. enableCustomCodeRegions: false enumFormat: union fixFlags: + asyncPaginationSep2025: true + conflictResistantModelImportsFeb2026: true responseRequiredSep2024: true flatAdditionalProperties: true flattenGlobalSecurity: true @@ -58,17 +65,17 @@ python: option: openapi paths: callbacks: "" - errors: "" + errors: errors operations: "" shared: "" webhooks: "" inferUnionDiscriminators: true inputModelSuffix: input license: "" - maxMethodParams: 15 + maxMethodParams: 999 methodArguments: infer-optional-args moduleName: mistralai.azure.client - multipartArrayFormat: legacy + multipartArrayFormat: standard outputModelSuffix: output packageManager: uv packageName: mistralai-azure @@ -78,3 +85,4 @@ python: responseFormat: flat sseFlatResponse: false templateVersion: v2 + useAsyncHooks: false diff --git a/packages/azure/docs/models/httpvalidationerror.md b/packages/azure/docs/errors/httpvalidationerror.md similarity index 100% rename from packages/azure/docs/models/httpvalidationerror.md rename to packages/azure/docs/errors/httpvalidationerror.md diff --git a/packages/azure/docs/models/chatcompletionrequest.md b/packages/azure/docs/models/chatcompletionrequest.md index 3b0f7270..f8715cd0 100644 --- a/packages/azure/docs/models/chatcompletionrequest.md +++ b/packages/azure/docs/models/chatcompletionrequest.md @@ -14,7 +14,7 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.ChatCompletionRequestMessage](../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | diff --git a/packages/azure/docs/models/chatcompletionstreamrequest.md b/packages/azure/docs/models/chatcompletionstreamrequest.md index f78156a6..cc82a8c7 100644 --- a/packages/azure/docs/models/chatcompletionstreamrequest.md +++ b/packages/azure/docs/models/chatcompletionstreamrequest.md @@ -14,7 +14,7 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.ChatCompletionStreamRequestMessage](../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | diff --git a/packages/azure/docs/models/ocrrequest.md b/packages/azure/docs/models/ocrrequest.md index 87929e53..2d26c19f 100644 --- a/packages/azure/docs/models/ocrrequest.md +++ b/packages/azure/docs/models/ocrrequest.md @@ -3,18 +3,18 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | -| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | -| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | {
"type": "text"
} | -| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | {
"type": "text"
} | -| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | -| `table_format` | [OptionalNullable[models.TableFormat]](../models/tableformat.md) | :heavy_minus_sign: | N/A | | -| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | +| `table_format` | [OptionalNullable[models.TableFormat]](../models/tableformat.md) | :heavy_minus_sign: | N/A | | +| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/azure/pylintrc b/packages/azure/pylintrc index a8fcb932..0391ac11 100644 --- a/packages/azure/pylintrc +++ b/packages/azure/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.9 +py-version=3.10 # Discover python modules and packages in the file system subtree. recursive=no @@ -459,7 +459,8 @@ disable=raw-checker-failed, consider-using-with, wildcard-import, unused-wildcard-import, - too-many-return-statements + too-many-return-statements, + redefined-builtin # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option @@ -641,7 +642,7 @@ additional-builtins= allow-global-unused-variables=yes # List of names allowed to shadow builtins -allowed-redefined-builtins=id,object +allowed-redefined-builtins=id,object,input,dir # List of strings which can identify a callback function by name. A callback # name must start or end with one of those strings. diff --git a/packages/azure/src/mistralai/azure/client/__init__.py b/packages/azure/src/mistralai/azure/client/__init__.py index dd02e42e..833c68cd 100644 --- a/packages/azure/src/mistralai/azure/client/__init__.py +++ b/packages/azure/src/mistralai/azure/client/__init__.py @@ -9,7 +9,6 @@ ) from .sdk import * from .sdkconfiguration import * -from .models import * VERSION: str = __version__ diff --git a/packages/azure/src/mistralai/azure/client/_version.py b/packages/azure/src/mistralai/azure/client/_version.py index 4448d2a0..4f985cc6 100644 --- a/packages/azure/src/mistralai/azure/client/_version.py +++ b/packages/azure/src/mistralai/azure/client/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-azure" -__version__: str = "2.0.0a4" +__version__: str = "2.0.0-a4.1" __openapi_doc_version__: str = "1.0.0" -__gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 2.0.0a4 2.794.1 1.0.0 mistralai-azure" +__gen_version__: str = "2.841.0" +__user_agent__: str = "speakeasy-sdk/python 2.0.0-a4.1 2.841.0 1.0.0 mistralai-azure" try: if __package__ is not None: diff --git a/packages/azure/src/mistralai/azure/client/basesdk.py b/packages/azure/src/mistralai/azure/client/basesdk.py index b0391ac0..0d4d9a44 100644 --- a/packages/azure/src/mistralai/azure/client/basesdk.py +++ b/packages/azure/src/mistralai/azure/client/basesdk.py @@ -2,7 +2,7 @@ from .sdkconfiguration import SDKConfiguration import httpx -from mistralai.azure.client import models, utils +from mistralai.azure.client import errors, utils from mistralai.azure.client._hooks import ( AfterErrorContext, AfterSuccessContext, @@ -12,6 +12,7 @@ RetryConfig, SerializedRequestBody, get_body_content, + run_sync_in_thread, ) from typing import Callable, List, Mapping, Optional, Tuple from urllib.parse import parse_qs, urlparse @@ -264,7 +265,7 @@ def do(): if http_res is None: logger.debug("Raising no response SDK error") - raise models.NoResponseError("No response received") + raise errors.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -285,7 +286,7 @@ def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred", http_res) + raise errors.SDKError("Unexpected error occurred", http_res) return http_res @@ -315,7 +316,10 @@ async def do_request_async( async def do(): http_res = None try: - req = hooks.before_request(BeforeRequestContext(hook_ctx), request) + req = await run_sync_in_thread( + hooks.before_request, BeforeRequestContext(hook_ctx), request + ) + logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -329,14 +333,17 @@ async def do(): http_res = await client.send(req, stream=stream) except Exception as e: - _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) + _, e = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), None, e + ) + if e is not None: logger.debug("Request Exception", exc_info=True) raise e if http_res is None: logger.debug("Raising no response SDK error") - raise models.NoResponseError("No response received") + raise errors.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -347,9 +354,10 @@ async def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = hooks.after_error( - AfterErrorContext(hook_ctx), http_res, None + result, err = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), http_res, None ) + if err is not None: logger.debug("Request Exception", exc_info=True) raise err @@ -357,7 +365,7 @@ async def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred", http_res) + raise errors.SDKError("Unexpected error occurred", http_res) return http_res @@ -369,6 +377,8 @@ async def do(): http_res = await do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) + http_res = await run_sync_in_thread( + hooks.after_success, AfterSuccessContext(hook_ctx), http_res + ) return http_res diff --git a/packages/azure/src/mistralai/azure/client/chat.py b/packages/azure/src/mistralai/azure/client/chat.py index 3348bf47..1051f952 100644 --- a/packages/azure/src/mistralai/azure/client/chat.py +++ b/packages/azure/src/mistralai/azure/client/chat.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK -from mistralai.azure.client import models, utils +from mistralai.azure.client import errors, models, utils from mistralai.azure.client._hooks import HookContext from mistralai.azure.client.types import OptionalNullable, UNSET from mistralai.azure.client.utils import eventstreaming @@ -179,18 +179,18 @@ def stream( if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -359,18 +359,18 @@ async def stream_async( if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) def complete( self, @@ -530,17 +530,17 @@ def complete( return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -700,14 +700,14 @@ async def complete_async( return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/packages/azure/src/mistralai/azure/client/errors/__init__.py b/packages/azure/src/mistralai/azure/client/errors/__init__.py new file mode 100644 index 00000000..79e2712c --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/errors/__init__.py @@ -0,0 +1,39 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .mistralazureerror import MistralAzureError +from typing import Any, TYPE_CHECKING + +from mistralai.azure.client.utils.dynamic_imports import lazy_getattr, lazy_dir + +if TYPE_CHECKING: + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .no_response_error import NoResponseError + from .responsevalidationerror import ResponseValidationError + from .sdkerror import SDKError + +__all__ = [ + "HTTPValidationError", + "HTTPValidationErrorData", + "MistralAzureError", + "NoResponseError", + "ResponseValidationError", + "SDKError", +] + +_dynamic_imports: dict[str, str] = { + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "NoResponseError": ".no_response_error", + "ResponseValidationError": ".responsevalidationerror", + "SDKError": ".sdkerror", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/azure/src/mistralai/azure/client/models/httpvalidationerror.py b/packages/azure/src/mistralai/azure/client/errors/httpvalidationerror.py similarity index 76% rename from packages/azure/src/mistralai/azure/client/models/httpvalidationerror.py rename to packages/azure/src/mistralai/azure/client/errors/httpvalidationerror.py index 40bccddc..b4f2691e 100644 --- a/packages/azure/src/mistralai/azure/client/models/httpvalidationerror.py +++ b/packages/azure/src/mistralai/azure/client/errors/httpvalidationerror.py @@ -1,16 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .validationerror import ValidationError from dataclasses import dataclass, field import httpx -from mistralai.azure.client.models import MistralAzureError +from mistralai.azure.client.errors import MistralAzureError +from mistralai.azure.client.models import validationerror as models_validationerror from mistralai.azure.client.types import BaseModel from typing import List, Optional class HTTPValidationErrorData(BaseModel): - detail: Optional[List[ValidationError]] = None + detail: Optional[List[models_validationerror.ValidationError]] = None @dataclass(unsafe_hash=True) diff --git a/packages/azure/src/mistralai/azure/client/models/mistralazureerror.py b/packages/azure/src/mistralai/azure/client/errors/mistralazureerror.py similarity index 100% rename from packages/azure/src/mistralai/azure/client/models/mistralazureerror.py rename to packages/azure/src/mistralai/azure/client/errors/mistralazureerror.py diff --git a/packages/azure/src/mistralai/azure/client/models/no_response_error.py b/packages/azure/src/mistralai/azure/client/errors/no_response_error.py similarity index 100% rename from packages/azure/src/mistralai/azure/client/models/no_response_error.py rename to packages/azure/src/mistralai/azure/client/errors/no_response_error.py diff --git a/packages/azure/src/mistralai/azure/client/models/responsevalidationerror.py b/packages/azure/src/mistralai/azure/client/errors/responsevalidationerror.py similarity index 92% rename from packages/azure/src/mistralai/azure/client/models/responsevalidationerror.py rename to packages/azure/src/mistralai/azure/client/errors/responsevalidationerror.py index cbdffcbb..02397334 100644 --- a/packages/azure/src/mistralai/azure/client/models/responsevalidationerror.py +++ b/packages/azure/src/mistralai/azure/client/errors/responsevalidationerror.py @@ -4,7 +4,7 @@ from typing import Optional from dataclasses import dataclass -from mistralai.azure.client.models import MistralAzureError +from mistralai.azure.client.errors import MistralAzureError @dataclass(unsafe_hash=True) diff --git a/packages/azure/src/mistralai/azure/client/models/sdkerror.py b/packages/azure/src/mistralai/azure/client/errors/sdkerror.py similarity index 95% rename from packages/azure/src/mistralai/azure/client/models/sdkerror.py rename to packages/azure/src/mistralai/azure/client/errors/sdkerror.py index a1e9aaca..c4f3616c 100644 --- a/packages/azure/src/mistralai/azure/client/models/sdkerror.py +++ b/packages/azure/src/mistralai/azure/client/errors/sdkerror.py @@ -4,7 +4,7 @@ from typing import Optional from dataclasses import dataclass -from mistralai.azure.client.models import MistralAzureError +from mistralai.azure.client.errors import MistralAzureError MAX_MESSAGE_LEN = 10_000 diff --git a/packages/azure/src/mistralai/azure/client/models/__init__.py b/packages/azure/src/mistralai/azure/client/models/__init__.py index 51db6a38..908dda32 100644 --- a/packages/azure/src/mistralai/azure/client/models/__init__.py +++ b/packages/azure/src/mistralai/azure/client/models/__init__.py @@ -1,10 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .mistralazureerror import MistralAzureError -from typing import TYPE_CHECKING -from importlib import import_module -import builtins -import sys +from typing import Any, TYPE_CHECKING + +from mistralai.azure.client.utils.dynamic_imports import lazy_getattr, lazy_dir if TYPE_CHECKING: from .assistantmessage import ( @@ -49,7 +47,7 @@ CompletionResponseStreamChoiceFinishReason, CompletionResponseStreamChoiceTypedDict, ) - from .contentchunk import ContentChunk, ContentChunkTypedDict + from .contentchunk import ContentChunk, ContentChunkTypedDict, UnknownContentChunk from .deltamessage import ( DeltaMessage, DeltaMessageContent, @@ -66,7 +64,6 @@ FunctionCallTypedDict, ) from .functionname import FunctionName, FunctionNameTypedDict - from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData from .imagedetail import ImageDetail from .imageurl import ImageURL, ImageURLTypedDict from .imageurlchunk import ( @@ -77,7 +74,6 @@ ) from .jsonschema import JSONSchema, JSONSchemaTypedDict from .mistralpromptmode import MistralPromptMode - from .no_response_error import NoResponseError from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict @@ -95,8 +91,6 @@ from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats - from .responsevalidationerror import ResponseValidationError - from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .systemmessage import ( SystemMessage, @@ -189,8 +183,6 @@ "FunctionName", "FunctionNameTypedDict", "FunctionTypedDict", - "HTTPValidationError", - "HTTPValidationErrorData", "ImageDetail", "ImageURL", "ImageURLChunk", @@ -202,9 +194,7 @@ "JSONSchemaTypedDict", "Loc", "LocTypedDict", - "MistralAzureError", "MistralPromptMode", - "NoResponseError", "OCRImageObject", "OCRImageObjectTypedDict", "OCRPageDimensions", @@ -226,8 +216,6 @@ "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", - "ResponseValidationError", - "SDKError", "Security", "SecurityTypedDict", "SystemMessage", @@ -255,6 +243,7 @@ "ToolMessageTypedDict", "ToolTypedDict", "ToolTypes", + "UnknownContentChunk", "UsageInfo", "UsageInfoTypedDict", "UserMessage", @@ -300,6 +289,7 @@ "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", "ContentChunk": ".contentchunk", "ContentChunkTypedDict": ".contentchunk", + "UnknownContentChunk": ".contentchunk", "DeltaMessage": ".deltamessage", "DeltaMessageContent": ".deltamessage", "DeltaMessageContentTypedDict": ".deltamessage", @@ -316,8 +306,6 @@ "FunctionCallTypedDict": ".functioncall", "FunctionName": ".functionname", "FunctionNameTypedDict": ".functionname", - "HTTPValidationError": ".httpvalidationerror", - "HTTPValidationErrorData": ".httpvalidationerror", "ImageDetail": ".imagedetail", "ImageURL": ".imageurl", "ImageURLTypedDict": ".imageurl", @@ -328,7 +316,6 @@ "JSONSchema": ".jsonschema", "JSONSchemaTypedDict": ".jsonschema", "MistralPromptMode": ".mistralpromptmode", - "NoResponseError": ".no_response_error", "OCRImageObject": ".ocrimageobject", "OCRImageObjectTypedDict": ".ocrimageobject", "OCRPageDimensions": ".ocrpagedimensions", @@ -354,8 +341,6 @@ "ResponseFormat": ".responseformat", "ResponseFormatTypedDict": ".responseformat", "ResponseFormats": ".responseformats", - "ResponseValidationError": ".responsevalidationerror", - "SDKError": ".sdkerror", "Security": ".security", "SecurityTypedDict": ".security", "SystemMessage": ".systemmessage", @@ -395,39 +380,11 @@ } -def dynamic_import(modname, retries=3): - for attempt in range(retries): - try: - return import_module(modname, __package__) - except KeyError: - # Clear any half-initialized module and retry - sys.modules.pop(modname, None) - if attempt == retries - 1: - break - raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") - - -def __getattr__(attr_name: str) -> object: - module_name = _dynamic_imports.get(attr_name) - if module_name is None: - raise AttributeError( - f"No {attr_name} found in _dynamic_imports for module name -> {__name__} " - ) - - try: - module = dynamic_import(module_name) - result = getattr(module, attr_name) - return result - except ImportError as e: - raise ImportError( - f"Failed to import {attr_name} from {module_name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to get {attr_name} from {module_name}: {e}" - ) from e +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) def __dir__(): - lazy_attrs = builtins.list(_dynamic_imports.keys()) - return builtins.sorted(lazy_attrs) + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/azure/src/mistralai/azure/client/models/assistantmessage.py b/packages/azure/src/mistralai/azure/client/models/assistantmessage.py index f5793f94..e9ae6e82 100644 --- a/packages/azure/src/mistralai/azure/client/models/assistantmessage.py +++ b/packages/azure/src/mistralai/azure/client/models/assistantmessage.py @@ -37,7 +37,7 @@ class AssistantMessageTypedDict(TypedDict): class AssistantMessage(BaseModel): - ROLE: Annotated[ + role: Annotated[ Annotated[ Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) ], @@ -53,30 +53,31 @@ class AssistantMessage(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["role", "content", "tool_calls", "prefix"] - nullable_fields = ["content", "tool_calls"] - null_default_fields = [] - + optional_fields = set(["role", "content", "tool_calls", "prefix"]) + nullable_fields = set(["content", "tool_calls"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + AssistantMessage.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py index 92179095..edd0fdc7 100644 --- a/packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py @@ -170,57 +170,56 @@ class ChatCompletionRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "model", - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - "safe_prompt", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - + optional_fields = set( + [ + "model", + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py index be21eed2..2edfbed9 100644 --- a/packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py @@ -168,57 +168,56 @@ class ChatCompletionStreamRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "model", - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - "safe_prompt", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - + optional_fields = set( + [ + "model", + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/completionchunk.py b/packages/azure/src/mistralai/azure/client/models/completionchunk.py index b94284b2..0e64bbc8 100644 --- a/packages/azure/src/mistralai/azure/client/models/completionchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/completionchunk.py @@ -6,7 +6,8 @@ CompletionResponseStreamChoiceTypedDict, ) from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import List, Optional from typing_extensions import NotRequired, TypedDict @@ -32,3 +33,19 @@ class CompletionChunk(BaseModel): created: Optional[int] = None usage: Optional[UsageInfo] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "created", "usage"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py b/packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py index 2a4d053f..20a27140 100644 --- a/packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py +++ b/packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py @@ -39,30 +39,14 @@ class CompletionResponseStreamChoice(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["finish_reason"] - null_default_fields = [] - serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): + if val != UNSET_SENTINEL: m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/contentchunk.py b/packages/azure/src/mistralai/azure/client/models/contentchunk.py index 0f09f767..17efcc7d 100644 --- a/packages/azure/src/mistralai/azure/client/models/contentchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/contentchunk.py @@ -4,9 +4,12 @@ from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from mistralai.azure.client.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union +from functools import partial +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union from typing_extensions import Annotated, TypeAliasType @@ -16,11 +19,32 @@ ) +class UnknownContentChunk(BaseModel): + r"""A ContentChunk variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CONTENT_CHUNK_VARIANTS: dict[str, Any] = { + "image_url": ImageURLChunk, + "text": TextChunk, + "reference": ReferenceChunk, +} + + ContentChunk = Annotated[ - Union[ - Annotated[ImageURLChunk, Tag("image_url")], - Annotated[TextChunk, Tag("text")], - Annotated[ReferenceChunk, Tag("reference")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Union[ImageURLChunk, TextChunk, ReferenceChunk, UnknownContentChunk], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_CONTENT_CHUNK_VARIANTS, + unknown_cls=UnknownContentChunk, + union_name="ContentChunk", + ) + ), ] diff --git a/packages/azure/src/mistralai/azure/client/models/deltamessage.py b/packages/azure/src/mistralai/azure/client/models/deltamessage.py index 2c01feae..567e772f 100644 --- a/packages/azure/src/mistralai/azure/client/models/deltamessage.py +++ b/packages/azure/src/mistralai/azure/client/models/deltamessage.py @@ -40,30 +40,25 @@ class DeltaMessage(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["role", "content", "tool_calls"] - null_default_fields = [] - + optional_fields = set(["role", "content", "tool_calls"]) + nullable_fields = set(["role", "content", "tool_calls"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/documenturlchunk.py b/packages/azure/src/mistralai/azure/client/models/documenturlchunk.py index 345bafc2..2dea8005 100644 --- a/packages/azure/src/mistralai/azure/client/models/documenturlchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/documenturlchunk.py @@ -26,7 +26,7 @@ class DocumentURLChunkTypedDict(TypedDict): class DocumentURLChunk(BaseModel): document_url: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Optional[Literal["document_url"]], AfterValidator(validate_const("document_url")), @@ -39,30 +39,31 @@ class DocumentURLChunk(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "document_name"] - nullable_fields = ["document_name"] - null_default_fields = [] - + optional_fields = set(["type", "document_name"]) + nullable_fields = set(["document_name"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m + + +try: + DocumentURLChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/filechunk.py b/packages/azure/src/mistralai/azure/client/models/filechunk.py index 829f03d8..6baa0cba 100644 --- a/packages/azure/src/mistralai/azure/client/models/filechunk.py +++ b/packages/azure/src/mistralai/azure/client/models/filechunk.py @@ -1,9 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL from mistralai.azure.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, TypedDict @@ -17,7 +18,29 @@ class FileChunkTypedDict(TypedDict): class FileChunk(BaseModel): file_id: str - TYPE: Annotated[ + type: Annotated[ Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))], pydantic.Field(alias="type"), ] = "file" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + FileChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/function.py b/packages/azure/src/mistralai/azure/client/models/function.py index f4edce0f..055d3657 100644 --- a/packages/azure/src/mistralai/azure/client/models/function.py +++ b/packages/azure/src/mistralai/azure/client/models/function.py @@ -1,7 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Any, Dict, Optional from typing_extensions import NotRequired, TypedDict @@ -21,3 +22,19 @@ class Function(BaseModel): description: Optional[str] = None strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "strict"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/imageurl.py b/packages/azure/src/mistralai/azure/client/models/imageurl.py index b3c705e3..bcb4fe43 100644 --- a/packages/azure/src/mistralai/azure/client/models/imageurl.py +++ b/packages/azure/src/mistralai/azure/client/models/imageurl.py @@ -25,30 +25,25 @@ class ImageURL(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["detail"] - nullable_fields = ["detail"] - null_default_fields = [] - + optional_fields = set(["detail"]) + nullable_fields = set(["detail"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py b/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py index ee6de50f..7213c498 100644 --- a/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py @@ -2,9 +2,10 @@ from __future__ import annotations from .imageurl import ImageURL, ImageURLTypedDict -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL from mistralai.azure.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -30,9 +31,31 @@ class ImageURLChunk(BaseModel): image_url: ImageURLUnion - TYPE: Annotated[ + type: Annotated[ Annotated[ Optional[Literal["image_url"]], AfterValidator(validate_const("image_url")) ], pydantic.Field(alias="type"), ] = "image_url" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ImageURLChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/jsonschema.py b/packages/azure/src/mistralai/azure/client/models/jsonschema.py index 5aaa490a..99f2fb89 100644 --- a/packages/azure/src/mistralai/azure/client/models/jsonschema.py +++ b/packages/azure/src/mistralai/azure/client/models/jsonschema.py @@ -32,30 +32,31 @@ class JSONSchema(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["description", "strict"] - nullable_fields = ["description"] - null_default_fields = [] - + optional_fields = set(["description", "strict"]) + nullable_fields = set(["description"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + JSONSchema.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/ocrimageobject.py b/packages/azure/src/mistralai/azure/client/models/ocrimageobject.py index 38e9d3e4..a23515b3 100644 --- a/packages/azure/src/mistralai/azure/client/models/ocrimageobject.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrimageobject.py @@ -53,37 +53,34 @@ class OCRImageObject(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["image_base64", "image_annotation"] - nullable_fields = [ - "top_left_x", - "top_left_y", - "bottom_right_x", - "bottom_right_y", - "image_base64", - "image_annotation", - ] - null_default_fields = [] - + optional_fields = set(["image_base64", "image_annotation"]) + nullable_fields = set( + [ + "top_left_x", + "top_left_y", + "bottom_right_x", + "bottom_right_y", + "image_base64", + "image_annotation", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/ocrpageobject.py b/packages/azure/src/mistralai/azure/client/models/ocrpageobject.py index 5fb821c1..434c8988 100644 --- a/packages/azure/src/mistralai/azure/client/models/ocrpageobject.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrpageobject.py @@ -62,30 +62,25 @@ class OCRPageObject(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["tables", "hyperlinks", "header", "footer"] - nullable_fields = ["header", "footer", "dimensions"] - null_default_fields = [] - + optional_fields = set(["tables", "hyperlinks", "header", "footer"]) + nullable_fields = set(["header", "footer", "dimensions"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/ocrrequest.py b/packages/azure/src/mistralai/azure/client/models/ocrrequest.py index fece2713..a2cd3415 100644 --- a/packages/azure/src/mistralai/azure/client/models/ocrrequest.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrrequest.py @@ -95,52 +95,51 @@ class OCRRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "id", - "pages", - "include_image_base64", - "image_limit", - "image_min_size", - "bbox_annotation_format", - "document_annotation_format", - "document_annotation_prompt", - "table_format", - "extract_header", - "extract_footer", - ] - nullable_fields = [ - "model", - "pages", - "include_image_base64", - "image_limit", - "image_min_size", - "bbox_annotation_format", - "document_annotation_format", - "document_annotation_prompt", - "table_format", - ] - null_default_fields = [] - + optional_fields = set( + [ + "id", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + "extract_header", + "extract_footer", + ] + ) + nullable_fields = set( + [ + "model", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/ocrresponse.py b/packages/azure/src/mistralai/azure/client/models/ocrresponse.py index 787289fa..3dc09fd7 100644 --- a/packages/azure/src/mistralai/azure/client/models/ocrresponse.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrresponse.py @@ -39,30 +39,25 @@ class OCRResponse(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["document_annotation"] - nullable_fields = ["document_annotation"] - null_default_fields = [] - + optional_fields = set(["document_annotation"]) + nullable_fields = set(["document_annotation"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/ocrtableobject.py b/packages/azure/src/mistralai/azure/client/models/ocrtableobject.py index 3e3c2583..f1de5428 100644 --- a/packages/azure/src/mistralai/azure/client/models/ocrtableobject.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrtableobject.py @@ -35,3 +35,9 @@ class OCRTableObject(BaseModel): format_: Annotated[Format, pydantic.Field(alias="format")] r"""Format of the table""" + + +try: + OCRTableObject.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py b/packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py index e2ceba35..f63315d2 100644 --- a/packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py @@ -28,30 +28,25 @@ class OCRUsageInfo(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["doc_size_bytes"] - nullable_fields = ["doc_size_bytes"] - null_default_fields = [] - + optional_fields = set(["doc_size_bytes"]) + nullable_fields = set(["doc_size_bytes"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/prediction.py b/packages/azure/src/mistralai/azure/client/models/prediction.py index 6b8d6480..1fa1d782 100644 --- a/packages/azure/src/mistralai/azure/client/models/prediction.py +++ b/packages/azure/src/mistralai/azure/client/models/prediction.py @@ -1,9 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL from mistralai.azure.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -19,7 +20,7 @@ class PredictionTypedDict(TypedDict): class Prediction(BaseModel): r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - TYPE: Annotated[ + type: Annotated[ Annotated[ Optional[Literal["content"]], AfterValidator(validate_const("content")) ], @@ -27,3 +28,25 @@ class Prediction(BaseModel): ] = "content" content: Optional[str] = "" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "content"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + Prediction.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/referencechunk.py b/packages/azure/src/mistralai/azure/client/models/referencechunk.py index e0bcb06b..f7af9bf9 100644 --- a/packages/azure/src/mistralai/azure/client/models/referencechunk.py +++ b/packages/azure/src/mistralai/azure/client/models/referencechunk.py @@ -1,9 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL from mistralai.azure.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional from typing_extensions import Annotated, TypedDict @@ -17,9 +18,31 @@ class ReferenceChunkTypedDict(TypedDict): class ReferenceChunk(BaseModel): reference_ids: List[int] - TYPE: Annotated[ + type: Annotated[ Annotated[ Optional[Literal["reference"]], AfterValidator(validate_const("reference")) ], pydantic.Field(alias="type"), ] = "reference" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ReferenceChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/responseformat.py b/packages/azure/src/mistralai/azure/client/models/responseformat.py index 39fb03a2..20fd2b86 100644 --- a/packages/azure/src/mistralai/azure/client/models/responseformat.py +++ b/packages/azure/src/mistralai/azure/client/models/responseformat.py @@ -31,30 +31,25 @@ class ResponseFormat(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "json_schema"] - nullable_fields = ["json_schema"] - null_default_fields = [] - + optional_fields = set(["type", "json_schema"]) + nullable_fields = set(["json_schema"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/systemmessage.py b/packages/azure/src/mistralai/azure/client/models/systemmessage.py index 38c280c8..d4bd0044 100644 --- a/packages/azure/src/mistralai/azure/client/models/systemmessage.py +++ b/packages/azure/src/mistralai/azure/client/models/systemmessage.py @@ -32,7 +32,13 @@ class SystemMessageTypedDict(TypedDict): class SystemMessage(BaseModel): content: SystemMessageContent - ROLE: Annotated[ + role: Annotated[ Annotated[Literal["system"], AfterValidator(validate_const("system"))], pydantic.Field(alias="role"), ] = "system" + + +try: + SystemMessage.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py b/packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py index 225f38b7..8de71c90 100644 --- a/packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py +++ b/packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py @@ -15,5 +15,5 @@ SystemMessageContentChunks = Annotated[ - Union[TextChunk, ThinkChunk], Field(discriminator="TYPE") + Union[TextChunk, ThinkChunk], Field(discriminator="type") ] diff --git a/packages/azure/src/mistralai/azure/client/models/textchunk.py b/packages/azure/src/mistralai/azure/client/models/textchunk.py index e513c143..92951485 100644 --- a/packages/azure/src/mistralai/azure/client/models/textchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/textchunk.py @@ -17,7 +17,13 @@ class TextChunkTypedDict(TypedDict): class TextChunk(BaseModel): text: str - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["text"], AfterValidator(validate_const("text"))], pydantic.Field(alias="type"), ] = "text" + + +try: + TextChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/thinkchunk.py b/packages/azure/src/mistralai/azure/client/models/thinkchunk.py index e769399f..4e881aad 100644 --- a/packages/azure/src/mistralai/azure/client/models/thinkchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/thinkchunk.py @@ -3,9 +3,10 @@ from __future__ import annotations from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL from mistralai.azure.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -29,10 +30,32 @@ class ThinkChunkTypedDict(TypedDict): class ThinkChunk(BaseModel): thinking: List[Thinking] - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["thinking"], AfterValidator(validate_const("thinking"))], pydantic.Field(alias="type"), ] = "thinking" closed: Optional[bool] = None r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["closed"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ThinkChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/tool.py b/packages/azure/src/mistralai/azure/client/models/tool.py index 169305bc..87329bdb 100644 --- a/packages/azure/src/mistralai/azure/client/models/tool.py +++ b/packages/azure/src/mistralai/azure/client/models/tool.py @@ -3,7 +3,8 @@ from __future__ import annotations from .function import Function, FunctionTypedDict from .tooltypes import ToolTypes -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -17,3 +18,19 @@ class Tool(BaseModel): function: Function type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/toolcall.py b/packages/azure/src/mistralai/azure/client/models/toolcall.py index a589b1b3..ada1ea65 100644 --- a/packages/azure/src/mistralai/azure/client/models/toolcall.py +++ b/packages/azure/src/mistralai/azure/client/models/toolcall.py @@ -3,7 +3,8 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -23,3 +24,19 @@ class ToolCall(BaseModel): type: Optional[ToolTypes] = None index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["id", "type", "index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/toolchoice.py b/packages/azure/src/mistralai/azure/client/models/toolchoice.py index 1f623222..ddb9e141 100644 --- a/packages/azure/src/mistralai/azure/client/models/toolchoice.py +++ b/packages/azure/src/mistralai/azure/client/models/toolchoice.py @@ -3,7 +3,8 @@ from __future__ import annotations from .functionname import FunctionName, FunctionNameTypedDict from .tooltypes import ToolTypes -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -23,3 +24,19 @@ class ToolChoice(BaseModel): r"""this restriction of `Function` is used to select a specific function to call""" type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/toolmessage.py b/packages/azure/src/mistralai/azure/client/models/toolmessage.py index a73fd6bf..670210de 100644 --- a/packages/azure/src/mistralai/azure/client/models/toolmessage.py +++ b/packages/azure/src/mistralai/azure/client/models/toolmessage.py @@ -35,7 +35,7 @@ class ToolMessageTypedDict(TypedDict): class ToolMessage(BaseModel): content: Nullable[ToolMessageContent] - ROLE: Annotated[ + role: Annotated[ Annotated[Literal["tool"], AfterValidator(validate_const("tool"))], pydantic.Field(alias="role"), ] = "tool" @@ -46,30 +46,31 @@ class ToolMessage(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["tool_call_id", "name"] - nullable_fields = ["content", "tool_call_id", "name"] - null_default_fields = [] - + optional_fields = set(["tool_call_id", "name"]) + nullable_fields = set(["content", "tool_call_id", "name"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ToolMessage.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/usageinfo.py b/packages/azure/src/mistralai/azure/client/models/usageinfo.py index 19a6b09f..0f04c87c 100644 --- a/packages/azure/src/mistralai/azure/client/models/usageinfo.py +++ b/packages/azure/src/mistralai/azure/client/models/usageinfo.py @@ -45,37 +45,34 @@ def additional_properties(self, value): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "prompt_tokens", - "completion_tokens", - "total_tokens", - "prompt_audio_seconds", - ] - nullable_fields = ["prompt_audio_seconds"] - null_default_fields = [] - + optional_fields = set( + [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + ) + nullable_fields = set(["prompt_audio_seconds"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val for k, v in serialized.items(): m[k] = v diff --git a/packages/azure/src/mistralai/azure/client/models/usermessage.py b/packages/azure/src/mistralai/azure/client/models/usermessage.py index 96439c64..549b01ca 100644 --- a/packages/azure/src/mistralai/azure/client/models/usermessage.py +++ b/packages/azure/src/mistralai/azure/client/models/usermessage.py @@ -27,37 +27,27 @@ class UserMessageTypedDict(TypedDict): class UserMessage(BaseModel): content: Nullable[UserMessageContent] - ROLE: Annotated[ + role: Annotated[ Annotated[Literal["user"], AfterValidator(validate_const("user"))], pydantic.Field(alias="role"), ] = "user" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["content"] - null_default_fields = [] - serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): + if val != UNSET_SENTINEL: m[k] = val return m + + +try: + UserMessage.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/ocr.py b/packages/azure/src/mistralai/azure/client/ocr.py index 098e764b..b9270f6a 100644 --- a/packages/azure/src/mistralai/azure/client/ocr.py +++ b/packages/azure/src/mistralai/azure/client/ocr.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK -from mistralai.azure.client import models, utils +from mistralai.azure.client import errors, models, utils from mistralai.azure.client._hooks import HookContext from mistralai.azure.client.types import Nullable, OptionalNullable, UNSET from mistralai.azure.client.utils.unmarshal_json_response import unmarshal_json_response @@ -130,17 +130,17 @@ def process( return unmarshal_json_response(models.OCRResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def process_async( self, @@ -263,14 +263,14 @@ async def process_async( return unmarshal_json_response(models.OCRResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/packages/azure/src/mistralai/azure/client/utils/__init__.py b/packages/azure/src/mistralai/azure/client/utils/__init__.py index 05f26ade..b488c2df 100644 --- a/packages/azure/src/mistralai/azure/client/utils/__init__.py +++ b/packages/azure/src/mistralai/azure/client/utils/__init__.py @@ -1,14 +1,23 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from typing import TYPE_CHECKING -from importlib import import_module -import builtins -import sys +from typing import Any, TYPE_CHECKING, Callable, TypeVar +import asyncio + +from .dynamic_imports import lazy_getattr, lazy_dir + +_T = TypeVar("_T") + + +async def run_sync_in_thread(func: Callable[..., _T], *args) -> _T: + """Run a synchronous function in a thread pool to avoid blocking the event loop.""" + return await asyncio.to_thread(func, *args) + if TYPE_CHECKING: from .annotations import get_discriminator from .datetimes import parse_datetime from .enums import OpenEnumMeta + from .unions import parse_open_union from .headers import get_headers, get_response_headers from .metadata import ( FieldMetadata, @@ -76,6 +85,7 @@ "match_response", "MultipartFormMetadata", "OpenEnumMeta", + "parse_open_union", "PathParamMetadata", "QueryParamMetadata", "remove_suffix", @@ -128,6 +138,7 @@ "match_response": ".values", "MultipartFormMetadata": ".metadata", "OpenEnumMeta": ".enums", + "parse_open_union": ".unions", "PathParamMetadata": ".metadata", "QueryParamMetadata": ".metadata", "remove_suffix": ".url", @@ -157,38 +168,11 @@ } -def dynamic_import(modname, retries=3): - for attempt in range(retries): - try: - return import_module(modname, __package__) - except KeyError: - # Clear any half-initialized module and retry - sys.modules.pop(modname, None) - if attempt == retries - 1: - break - raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") - - -def __getattr__(attr_name: str) -> object: - module_name = _dynamic_imports.get(attr_name) - if module_name is None: - raise AttributeError( - f"no {attr_name} found in _dynamic_imports, module name -> {__name__} " - ) - - try: - module = dynamic_import(module_name) - return getattr(module, attr_name) - except ImportError as e: - raise ImportError( - f"Failed to import {attr_name} from {module_name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to get {attr_name} from {module_name}: {e}" - ) from e +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) def __dir__(): - lazy_attrs = builtins.list(_dynamic_imports.keys()) - return builtins.sorted(lazy_attrs) + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/azure/src/mistralai/azure/client/utils/dynamic_imports.py b/packages/azure/src/mistralai/azure/client/utils/dynamic_imports.py new file mode 100644 index 00000000..673edf82 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/dynamic_imports.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from importlib import import_module +import builtins +import sys + + +def dynamic_import(package, modname, retries=3): + """Import a module relative to package, retrying on KeyError from half-initialized modules.""" + for attempt in range(retries): + try: + return import_module(modname, package) + except KeyError: + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def lazy_getattr(attr_name, *, package, dynamic_imports, sub_packages=None): + """Module-level __getattr__ that lazily loads from a dynamic_imports mapping. + + Args: + attr_name: The attribute being looked up. + package: The caller's __package__ (for relative imports). + dynamic_imports: Dict mapping attribute names to relative module paths. + sub_packages: Optional list of subpackage names to lazy-load. + """ + module_name = dynamic_imports.get(attr_name) + if module_name is not None: + try: + module = dynamic_import(package, module_name) + return getattr(module, attr_name) + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + if sub_packages and attr_name in sub_packages: + return import_module(f".{attr_name}", package) + + raise AttributeError(f"module '{package}' has no attribute '{attr_name}'") + + +def lazy_dir(*, dynamic_imports, sub_packages=None): + """Module-level __dir__ that lists lazily-loadable attributes.""" + lazy_attrs = builtins.list(dynamic_imports.keys()) + if sub_packages: + lazy_attrs.extend(sub_packages) + return builtins.sorted(lazy_attrs) diff --git a/packages/azure/src/mistralai/azure/client/utils/eventstreaming.py b/packages/azure/src/mistralai/azure/client/utils/eventstreaming.py index 0969899b..f2052fc2 100644 --- a/packages/azure/src/mistralai/azure/client/utils/eventstreaming.py +++ b/packages/azure/src/mistralai/azure/client/utils/eventstreaming.py @@ -2,7 +2,9 @@ import re import json +from dataclasses import dataclass, asdict from typing import ( + Any, Callable, Generic, TypeVar, @@ -22,6 +24,7 @@ class EventStream(Generic[T]): client_ref: Optional[object] response: httpx.Response generator: Generator[T, None, None] + _closed: bool def __init__( self, @@ -33,17 +36,21 @@ def __init__( self.response = response self.generator = stream_events(response, decoder, sentinel) self.client_ref = client_ref + self._closed = False def __iter__(self): return self def __next__(self): + if self._closed: + raise StopIteration return next(self.generator) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): + self._closed = True self.response.close() @@ -53,6 +60,7 @@ class EventStreamAsync(Generic[T]): client_ref: Optional[object] response: httpx.Response generator: AsyncGenerator[T, None] + _closed: bool def __init__( self, @@ -64,33 +72,45 @@ def __init__( self.response = response self.generator = stream_events_async(response, decoder, sentinel) self.client_ref = client_ref + self._closed = False def __aiter__(self): return self async def __anext__(self): + if self._closed: + raise StopAsyncIteration return await self.generator.__anext__() async def __aenter__(self): return self async def __aexit__(self, exc_type, exc_val, exc_tb): + self._closed = True await self.response.aclose() +@dataclass class ServerEvent: id: Optional[str] = None event: Optional[str] = None - data: Optional[str] = None + data: Any = None retry: Optional[int] = None MESSAGE_BOUNDARIES = [ b"\r\n\r\n", - b"\n\n", + b"\r\n\r", + b"\r\n\n", + b"\r\r\n", + b"\n\r\n", b"\r\r", + b"\n\r", + b"\n\n", ] +UTF8_BOM = b"\xef\xbb\xbf" + async def stream_events_async( response: httpx.Response, @@ -99,14 +119,10 @@ async def stream_events_async( ) -> AsyncGenerator[T, None]: buffer = bytearray() position = 0 - discard = False + event_id: Optional[str] = None async for chunk in response.aiter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] buffer += chunk for i in range(position, len(buffer)): char = buffer[i : i + 1] @@ -121,15 +137,22 @@ async def stream_events_async( block = buffer[position:i] position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event + if discard: + await response.aclose() + return if position > 0: buffer = buffer[position:] position = 0 - event, discard = _parse_event(buffer, decoder, sentinel) + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event @@ -141,14 +164,10 @@ def stream_events( ) -> Generator[T, None, None]: buffer = bytearray() position = 0 - discard = False + event_id: Optional[str] = None for chunk in response.iter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] buffer += chunk for i in range(position, len(buffer)): char = buffer[i : i + 1] @@ -163,22 +182,33 @@ def stream_events( block = buffer[position:i] position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event + if discard: + response.close() + return if position > 0: buffer = buffer[position:] position = 0 - event, discard = _parse_event(buffer, decoder, sentinel) + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event def _parse_event( - raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None -) -> Tuple[Optional[T], bool]: + *, + raw: bytearray, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + event_id: Optional[str] = None, +) -> Tuple[Optional[T], bool, Optional[str]]: block = raw.decode() lines = re.split(r"\r?\n|\r", block) publish = False @@ -189,13 +219,16 @@ def _parse_event( continue delim = line.find(":") - if delim <= 0: + if delim == 0: continue - field = line[0:delim] - value = line[delim + 1 :] if delim < len(line) - 1 else "" - if len(value) and value[0] == " ": - value = value[1:] + field = line + value = "" + if delim > 0: + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] if field == "event": event.event = value @@ -204,37 +237,36 @@ def _parse_event( data += value + "\n" publish = True elif field == "id": - event.id = value publish = True + if "\x00" not in value: + event_id = value elif field == "retry": - event.retry = int(value) if value.isdigit() else None + if value.isdigit(): + event.retry = int(value) publish = True + event.id = event_id + if sentinel and data == f"{sentinel}\n": - return None, True + return None, True, event_id if data: data = data[:-1] - event.data = data - - data_is_primitive = ( - data.isnumeric() or data == "true" or data == "false" or data == "null" - ) - data_is_json = ( - data.startswith("{") or data.startswith("[") or data.startswith('"') - ) - - if data_is_primitive or data_is_json: - try: - event.data = json.loads(data) - except Exception: - pass + try: + event.data = json.loads(data) + except json.JSONDecodeError: + event.data = data out = None if publish: - out = decoder(json.dumps(event.__dict__)) - - return out, False + out_dict = { + k: v + for k, v in asdict(event).items() + if v is not None or (k == "data" and data) + } + out = decoder(json.dumps(out_dict)) + + return out, False, event_id def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): diff --git a/packages/azure/src/mistralai/azure/client/utils/forms.py b/packages/azure/src/mistralai/azure/client/utils/forms.py index f961e76b..1e550bd5 100644 --- a/packages/azure/src/mistralai/azure/client/utils/forms.py +++ b/packages/azure/src/mistralai/azure/client/utils/forms.py @@ -142,7 +142,7 @@ def serialize_multipart_form( if field_metadata.file: if isinstance(val, List): # Handle array of files - array_field_name = f_name + "[]" + array_field_name = f_name for file_obj in val: if not _is_set(file_obj): continue @@ -185,7 +185,7 @@ def serialize_multipart_form( continue values.append(_val_to_string(value)) - array_field_name = f_name + "[]" + array_field_name = f_name form[array_field_name] = values else: form[f_name] = _val_to_string(val) diff --git a/packages/azure/src/mistralai/azure/client/utils/retries.py b/packages/azure/src/mistralai/azure/client/utils/retries.py index 88a91b10..af07d4e9 100644 --- a/packages/azure/src/mistralai/azure/client/utils/retries.py +++ b/packages/azure/src/mistralai/azure/client/utils/retries.py @@ -144,12 +144,7 @@ def do_request() -> httpx.Response: if res.status_code == parsed_code: raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: + except (httpx.NetworkError, httpx.TimeoutException) as exception: if retries.config.retry_connection_errors: raise @@ -193,12 +188,7 @@ async def do_request() -> httpx.Response: if res.status_code == parsed_code: raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: + except (httpx.NetworkError, httpx.TimeoutException) as exception: if retries.config.retry_connection_errors: raise diff --git a/packages/azure/src/mistralai/azure/client/utils/security.py b/packages/azure/src/mistralai/azure/client/utils/security.py index 295a3f40..17996bd5 100644 --- a/packages/azure/src/mistralai/azure/client/utils/security.py +++ b/packages/azure/src/mistralai/azure/client/utils/security.py @@ -135,6 +135,8 @@ def _parse_security_scheme_value( elif scheme_type == "http": if sub_type == "bearer": headers[header_name] = _apply_bearer(value) + elif sub_type == "basic": + headers[header_name] = value elif sub_type == "custom": return else: diff --git a/packages/azure/src/mistralai/azure/client/utils/unions.py b/packages/azure/src/mistralai/azure/client/utils/unions.py new file mode 100644 index 00000000..a227f4e8 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/unions.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any + +from pydantic import BaseModel, TypeAdapter + + +def parse_open_union( + v: Any, + *, + disc_key: str, + variants: dict[str, Any], + unknown_cls: type, + union_name: str, +) -> Any: + """Parse an open discriminated union value with forward-compatibility. + + Known discriminator values are dispatched to their variant types. + Unknown discriminator values produce an instance of the fallback class, + preserving the raw payload for inspection. + """ + if isinstance(v, BaseModel): + return v + if not isinstance(v, dict) or disc_key not in v: + raise ValueError(f"{union_name}: expected object with '{disc_key}' field") + disc = v[disc_key] + variant_cls = variants.get(disc) + if variant_cls is not None: + if isinstance(variant_cls, type) and issubclass(variant_cls, BaseModel): + return variant_cls.model_validate(v) + return TypeAdapter(variant_cls).validate_python(v) + return unknown_cls(raw=v) diff --git a/packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py b/packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py index 5317ac87..fe0c9b8e 100644 --- a/packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py +++ b/packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py @@ -5,7 +5,7 @@ import httpx from .serializers import unmarshal_json -from mistralai.azure.client import models +from mistralai.azure.client import errors T = TypeVar("T") @@ -30,7 +30,7 @@ def unmarshal_json_response( try: return unmarshal_json(body, typ) except Exception as e: - raise models.ResponseValidationError( + raise errors.ResponseValidationError( "Response validation failed", http_res, e, diff --git a/packages/gcp/.speakeasy/gen.lock b/packages/gcp/.speakeasy/gen.lock index 8ce6c5ea..517e1a85 100644 --- a/packages/gcp/.speakeasy/gen.lock +++ b/packages/gcp/.speakeasy/gen.lock @@ -3,46 +3,46 @@ id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: docChecksum: bc4a0ba9c38418d84a6a8a76b503977b docVersion: 1.0.0 - speakeasyVersion: 1.685.0 - generationVersion: 2.794.1 - releaseVersion: 2.0.0a4 - configChecksum: 95fb33ae488fa72fb4ba17c6b93551a9 + speakeasyVersion: 1.729.0 + generationVersion: 2.841.0 + releaseVersion: 2.0.0-a4.1 + configChecksum: bfe17061a2e5ac54039980ad7a48fd77 repoURL: https://github.com/mistralai/client-python.git repoSubDirectory: packages/gcp installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/gcp published: true persistentEdits: - generation_id: 5f09b925-b801-4bf0-bda9-6f9a3212c588 - pristine_commit_hash: 20c7ce96f6a097f98d3367b89a7bea09ba0ded7c - pristine_tree_hash: c30d519719cc0cd17d7bf53ae2c13b1d8b125c5e + generation_id: c7e2e696-b223-4993-a79b-2e6f15242c30 + pristine_commit_hash: 86953bc23bb7fcfc3c2525f79114411bc27e8f75 + pristine_tree_hash: 93675a8857b7519918499101d4a5e30fc7fe2c4a features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 configurableModuleName: 0.2.0 - constsAndDefaults: 1.0.5 - core: 5.23.18 + constsAndDefaults: 1.0.7 + core: 6.0.12 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 examples: 3.0.2 flatRequests: 1.0.1 - globalSecurity: 3.0.4 + globalSecurity: 3.0.5 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.2.0 includes: 3.0.0 methodArguments: 1.0.2 - nameOverrides: 3.0.1 - nullables: 1.0.1 - openEnums: 1.0.2 - responseFormat: 1.0.1 - retries: 3.0.3 - sdkHooks: 1.2.0 - serverEvents: 1.0.11 + nameOverrides: 3.0.3 + nullables: 1.0.2 + openEnums: 1.0.4 + responseFormat: 1.1.0 + retries: 3.0.4 + sdkHooks: 1.2.1 + serverEvents: 1.0.13 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.1.1 + unions: 3.1.4 trackedFiles: .gitattributes: id: 24139dae6567 @@ -52,6 +52,10 @@ trackedFiles: id: 89aa447020cd last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + docs/errors/httpvalidationerror.md: + id: 7fe2e5327e07 + last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e + pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc docs/models/arguments.md: id: 7ea5e33709a7 last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec @@ -74,8 +78,8 @@ trackedFiles: pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b docs/models/chatcompletionrequest.md: id: adffe90369d0 - last_write_checksum: sha1:2bf5152388f18436be4fe1c541b8d423dcae175c - pristine_git_object: 61a25d86e7dc292621f7f6c0f8909137a16b9112 + last_write_checksum: sha1:6374e05aeb66d48137d657acaa89527df2db35c6 + pristine_git_object: 8dbd4a82ad1d7725b9a6ce56daea208ca01b9210 docs/models/chatcompletionrequestmessage.md: id: 3f5e170d418c last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 @@ -94,8 +98,8 @@ trackedFiles: pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b docs/models/chatcompletionstreamrequest.md: id: cf8f29558a68 - last_write_checksum: sha1:f30b2a7353e7406eb30af841a1a211ea5cb30cb0 - pristine_git_object: 3e790e7dc7143b0ae287ad2df14ae7e7a4085e3f + last_write_checksum: sha1:e23cf88a5a9b0c99e68d06a8450b8bfb9aee33a2 + pristine_git_object: db76b6c81a71607f94c212a542fe30e082053a90 docs/models/chatcompletionstreamrequestmessage.md: id: 053a98476cd2 last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 @@ -168,10 +172,6 @@ trackedFiles: id: 4b3bd62c0f26 last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb pristine_git_object: 87d7b4852de629015166605b273deb9341202dc0 - docs/models/httpvalidationerror.md: - id: a211c095f2ac - last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e - pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc docs/models/imagedetail.md: id: f8217529b496 last_write_checksum: sha1:fdf19ac9459f64616240955cb81a84ef03e775c8 @@ -298,8 +298,8 @@ trackedFiles: pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 pylintrc: id: 7ce8b9f946e6 - last_write_checksum: sha1:6b615d49741eb9ae16375d3a499767783d1128a1 - pristine_git_object: a8fcb932ba2a01c5e96e3b04c59371e930b75558 + last_write_checksum: sha1:8f871a5aac4b10bff724c9d91b8d7496eb1fbdde + pristine_git_object: 0391ac11bdc5526b697b69d047d568a611ce87d0 scripts/prepare_readme.py: id: e0c5957a6035 last_write_checksum: sha1:eb988bc0e00ed4bb14e9a3572845af14f06c9b42 @@ -310,8 +310,8 @@ trackedFiles: pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 src/mistralai/gcp/client/__init__.py: id: 4f63decd432e - last_write_checksum: sha1:36306d1d404b6aeb912d27f1d9c52f098ff7bf9b - pristine_git_object: dd02e42e4cc509dc90e6ae70493054021faa5f9c + last_write_checksum: sha1:da077c0bdfcef64a4a5aea91a17292f72fa2b088 + pristine_git_object: 833c68cd526fe34aab2b7e7c45f974f7f4b9e120 src/mistralai/gcp/client/_hooks/__init__.py: id: adcb191838d1 last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d @@ -326,84 +326,108 @@ trackedFiles: pristine_git_object: ea95bed210db9180824efddfb1b3e47f5bf96489 src/mistralai/gcp/client/_version.py: id: f87319e32c7b - last_write_checksum: sha1:8c07e6351bf2df8239b3c02db75ee469dba53394 - pristine_git_object: ba48dac120cadd3f586b38659dc04e50838daa11 + last_write_checksum: sha1:85dd6da1d6503d717e8c9bd6d62278b469d3b464 + pristine_git_object: 204c92a656855ad281e86a74467e71ae1b04639f src/mistralai/gcp/client/basesdk.py: id: 4d594572857b - last_write_checksum: sha1:45ed4b6078e01d52d1dcf4bdc5494b700f1a6cde - pristine_git_object: 6f9f5fd9a2cadc8893d6693c1d40a8114c0fdc2a + last_write_checksum: sha1:d8ef9e2f4fa97d402eb9f5472ceb80fb39693991 + pristine_git_object: b3edcb0aca1882d0cbe4d499cfba9cb5464c5b58 src/mistralai/gcp/client/chat.py: id: 4c41f05f786e - last_write_checksum: sha1:a4d5609f51dee25dfc34f83e1eda2888aa01dda6 - pristine_git_object: 78541248204cbd5b92b6d6d362924fcdada8a948 + last_write_checksum: sha1:60b2697e2ecfb62eebed910007e62ab1df565eec + pristine_git_object: 925d69eda2fdac458045cc12327ca72997e07600 + src/mistralai/gcp/client/errors/__init__.py: + id: c51c8ed21629 + last_write_checksum: sha1:29f08ad600a712ff572843a250839ef92efac19b + pristine_git_object: 00c8ee0031486b5416bb6745397c463e1a5dbba6 + src/mistralai/gcp/client/errors/httpvalidationerror.py: + id: b0e25f1c36bd + last_write_checksum: sha1:c863914ed6704ee6c3ad99a77d8b1e742de069d0 + pristine_git_object: 598068197b9ed7e7756de01325f7967a719e46ea + src/mistralai/gcp/client/errors/mistralgcperror.py: + id: 9a9cad8f5d36 + last_write_checksum: sha1:7267c829a842a94c5b84ac248a1610ce45f3db4e + pristine_git_object: 9de91bf2a4abf8b0d0922eb6062fe2ab817a8aee + src/mistralai/gcp/client/errors/no_response_error.py: + id: 2d3e5fe56122 + last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f + pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 + src/mistralai/gcp/client/errors/responsevalidationerror.py: + id: 98f7bac284be + last_write_checksum: sha1:1b835d2ce8754b22d5fa269077d7a2eec11d7f29 + pristine_git_object: e8bd83c19b0629bb0ddf7a240e9b8371cb33fff3 + src/mistralai/gcp/client/errors/sdkerror.py: + id: c53aee73c8e1 + last_write_checksum: sha1:080933e9f354b675988a132813f23e55f9e5db74 + pristine_git_object: 6980924626fa5fbf67fb62a30fd23d5883dbe650 src/mistralai/gcp/client/fim.py: id: 13d2d208e0ef - last_write_checksum: sha1:e6226c1720effd314afa7b9a21e5ec2347e5a74f - pristine_git_object: e2acacd58c28fa7ea718240b01a3714f7fc0b8f6 + last_write_checksum: sha1:1027165887446ce0764ad542ca52f61b460c71b8 + pristine_git_object: 4202102ae5218784a10ee93ada5a0643d23a1d0c src/mistralai/gcp/client/httpclient.py: id: a53dd7be6a4c last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 pristine_git_object: 89560b566073785535643e694c112bedbd3db13d src/mistralai/gcp/client/models/__init__.py: id: d9e976d01972 - last_write_checksum: sha1:f0554ff6b81286615330ffea947e619bc508bf19 - pristine_git_object: fb446c259f4ca1cc97ec64aac197f52b8224a096 + last_write_checksum: sha1:97ddfc7f70abd5e1a0b36be6dce209b69e9d5c73 + pristine_git_object: 575f64040c90152e74954b749ea89bce5a07e02e src/mistralai/gcp/client/models/assistantmessage.py: id: d39c4bdd289e - last_write_checksum: sha1:08fa98315561d5bb2c094bf57e7d66639b86e3ee - pristine_git_object: 7061775b3dbd9be0b978ff2a2cb07e52c01fc80a + last_write_checksum: sha1:c813783bcbeec4e40f12e007d1dde4aed8ec71cf + pristine_git_object: 702ac4708abb95fc18d138500b8353715c2dbc98 src/mistralai/gcp/client/models/chatcompletionchoice.py: id: 8e65b56f3e6d last_write_checksum: sha1:e6d1382e9f880b866130d900fd866997aaf80e45 pristine_git_object: ae5a2fbf38afbd86233dcaa8aa1c8441f5ed9eba src/mistralai/gcp/client/models/chatcompletionrequest.py: id: 4694a31c0003 - last_write_checksum: sha1:edb744ec2baca1f9ba6574662fffb36fb7d3faab - pristine_git_object: 1bc039221910bf88396c96affe735c8ac822920b + last_write_checksum: sha1:80fcbbcde773c22c93cf2db63beef2cfe3777497 + pristine_git_object: 8229c5bb13ded84039f3d8ddb95ac0a9c184e1bd src/mistralai/gcp/client/models/chatcompletionresponse.py: id: dd9e4796fca9 last_write_checksum: sha1:76d7257583389ff5021e320a8f9a45a6deb07c7c pristine_git_object: 317c4d84e378c14294d58c5aefd8c55ffe28754a src/mistralai/gcp/client/models/chatcompletionstreamrequest.py: id: 7294862af8ea - last_write_checksum: sha1:75d5bfcc204339b152dc78e33ac449c3aa9b5432 - pristine_git_object: 0a5a0021a4862e7b92a5c31679bf42bfa704d15b + last_write_checksum: sha1:899210f881bdbe0a0d94e29fe7044fabbccc578c + pristine_git_object: 3c228d2e7edf08c36f310e190a8dedc7b4958459 src/mistralai/gcp/client/models/completionchunk.py: id: 6b9ed8c30877 - last_write_checksum: sha1:4afc07c1824d81640f52a5c8bf89fde8893269b9 - pristine_git_object: 9e54cb6dfaccf7f815b40be585e11585cb5fef78 + last_write_checksum: sha1:f1f091e94e3c1c1aefd3c3bb60c8de8236ab0ead + pristine_git_object: a0b1ae2fa3109a2c2b76bbc483b691d88dc9a15c src/mistralai/gcp/client/models/completionevent.py: id: 3f55c4b8fc75 last_write_checksum: sha1:66665d921fd27df6ef0efce996a5446e49b989d8 pristine_git_object: bb1550093ce9adcb9bcd0548b69796e82f4f260b src/mistralai/gcp/client/models/completionresponsestreamchoice.py: id: ad9b98ca7e1c - last_write_checksum: sha1:04d195584fe4ea16544685e9989e5ae35205179a - pristine_git_object: 6f306721fbe47780c778833b80e97ab5d25d8367 + last_write_checksum: sha1:c4f9d733461bdb9a0d6c96e82212de7dddc04ffe + pristine_git_object: e58d4c88009ed3696d2a3a57f3796d8fb067019d src/mistralai/gcp/client/models/contentchunk.py: id: 8714d3bf2698 - last_write_checksum: sha1:347f43b4d7dcab18e09e6c3323f745a25ecfb04c - pristine_git_object: 1cd9e502ab7d4860daa79f907beafa71da086ab3 + last_write_checksum: sha1:acab1b53b1d324544c6aa6c4126a3fb5265278d2 + pristine_git_object: 18d481505e17d2125e380d796b0c406b0e66d601 src/mistralai/gcp/client/models/deltamessage.py: id: 404fc85f1a4c - last_write_checksum: sha1:3375624531d12279d225fb07a68e0396483b962f - pristine_git_object: 96923518438137cb729a69149b5b99be49836ad7 + last_write_checksum: sha1:982c2d15a570c7f4d5e1c3b012db46ea3bac609b + pristine_git_object: 63e6a7f3e50c138f235f5a36277aa8668f85cef1 src/mistralai/gcp/client/models/fimcompletionrequest.py: id: 5b79e2595d31 - last_write_checksum: sha1:cc4fa68c60a6a500a9887e47dd2e9220327c6226 - pristine_git_object: f37bbcc3cab020224531da898dd99cc175d49cd9 + last_write_checksum: sha1:80a2e3d5e10c240869cd96c41936d714cf8bf801 + pristine_git_object: e460f76c59315c22c75194936f1f3b232331f83c src/mistralai/gcp/client/models/fimcompletionresponse.py: id: 402f602d29b8 last_write_checksum: sha1:cfe26848c7b14d6e374b7944d7ad44df822990b0 pristine_git_object: 5b80da3f03e4e99dfca971a53af1cf6472c889bb src/mistralai/gcp/client/models/fimcompletionstreamrequest.py: id: 31190cf25070 - last_write_checksum: sha1:720f0a039a62cb508d513475a0e4bad45a9aa03c - pristine_git_object: 8e6102612998bde70d830bb0b8ee3a5e2a4dd01e + last_write_checksum: sha1:a95ab8c20b2fdff48102f08258a556af9f382ffa + pristine_git_object: fffc305499e578f77e42fb7992b59e933ae0ae7c src/mistralai/gcp/client/models/function.py: id: 2285a899b32e - last_write_checksum: sha1:a69ad9c8cd723e78a3949deefe43bcbf57426916 - pristine_git_object: 28577eff06d052aeb58c2795dd0a92ae4f2e7552 + last_write_checksum: sha1:6439f7f781174ae56b2b02ccbb4d02b08d8d5a03 + pristine_git_object: 439e831355444e0f9e82d23636651201f0db4bfc src/mistralai/gcp/client/models/functioncall.py: id: 17bb51f08e5f last_write_checksum: sha1:b5fe2f061ea5f47057ee50011babc80de27e0ee6 @@ -412,114 +436,94 @@ trackedFiles: id: 313a6001145f last_write_checksum: sha1:fe1eefaed314efa788bd15beb63bf6b81abb307e pristine_git_object: 585b9e39762e49356823e211ad86f701bca389b8 - src/mistralai/gcp/client/models/httpvalidationerror.py: - id: bdb67f678798 - last_write_checksum: sha1:58b6b7a2b2f8e4f66fc14c38540a26cfd2541a1e - pristine_git_object: 57df72607adc980b061d092f77140c6dbd36ecec src/mistralai/gcp/client/models/imagedetail.py: id: a28b2f3e2cb5 last_write_checksum: sha1:a4874529961952019eaa86a2fa0989626f537a4c pristine_git_object: 68ed76080716eb1424b13f182479f57e51a4fabf src/mistralai/gcp/client/models/imageurl.py: id: 4e330f3eae74 - last_write_checksum: sha1:3c5d70c0698b1b4b9c99087241227bab3dc0cdbf - pristine_git_object: d4f298f12d8095590cded5714091596b505c59b1 + last_write_checksum: sha1:6c0bee7d7c765fb2611131c7d270041671b428b8 + pristine_git_object: 903d0a1a45eeb7c5e8cde80f624b6e039de1f4cc src/mistralai/gcp/client/models/imageurlchunk.py: id: e68a4a393e9b - last_write_checksum: sha1:2eb2c8a205e5f8b320e2f597075cad9e5e27475b - pristine_git_object: fc5284c102c17a33c1ba6029c87515d509cd014b + last_write_checksum: sha1:eae1d0e69a90b2f7513492e4cd0ed68d647f0b5d + pristine_git_object: 4bec0eec882c1eeee8a80f663ff7d686ca677ea0 src/mistralai/gcp/client/models/jsonschema.py: id: 39c6e7d412a0 - last_write_checksum: sha1:29ba87457959588ff7d8188ae2382fb88740151d - pristine_git_object: 443c429dd1461d7a6817335626cd585577c5bffe - src/mistralai/gcp/client/models/mistralgcperror.py: - id: 278d296220ff - last_write_checksum: sha1:7267c829a842a94c5b84ac248a1610ce45f3db4e - pristine_git_object: 9de91bf2a4abf8b0d0922eb6062fe2ab817a8aee + last_write_checksum: sha1:19b34a5e3f5c00d1a1b96f91a6e02f5ad12240c7 + pristine_git_object: 684ac09f0460bef1f26bf0030b79bbc7141ab99b src/mistralai/gcp/client/models/mistralpromptmode.py: id: 8be4a4a683e4 last_write_checksum: sha1:c958567e95490abf3941fde69be69733e8afb90e pristine_git_object: c765e4f1a0b86735255771231377f13d62f3d7a6 - src/mistralai/gcp/client/models/no_response_error.py: - id: 2a7fa173594b - last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f - pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 src/mistralai/gcp/client/models/prediction.py: id: 7a5463285bc8 - last_write_checksum: sha1:1d1e81082d1c2bfd613f0bc00f7173995ad67c0c - pristine_git_object: f53579edc665dd7fc1cc2497b0cd05b69e541cd8 + last_write_checksum: sha1:67c4a9b06d3e98552409a26960e0afd64f829b53 + pristine_git_object: 2e325289fd6c2a987ad270fd808f7b9a3f423440 src/mistralai/gcp/client/models/referencechunk.py: id: 523e477f8725 - last_write_checksum: sha1:d29c5fc1d8b6850fdeb3abc7f83185de92571b23 - pristine_git_object: 274ea7f7b142714d96040428fe7b87eeb48432cb + last_write_checksum: sha1:aade1dc05c2a2672630eb17626e4f49367d6bfe6 + pristine_git_object: 261c4755641093a38f97b17dce3a387623e69ead src/mistralai/gcp/client/models/responseformat.py: id: 06774bb65b42 - last_write_checksum: sha1:a52a60dc45c0b0939b99754d6c0c603ef2f737d3 - pristine_git_object: 34ae6b039a6c83c603fc6d47f6b2f233ec6c817a + last_write_checksum: sha1:7e64de46ef34718003cf0d198868a193f2122178 + pristine_git_object: f3aa9930e0f8a009dac628300d66c6209a538031 src/mistralai/gcp/client/models/responseformats.py: id: 18112ad0f6db last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 - src/mistralai/gcp/client/models/responsevalidationerror.py: - id: b90c1c09ac00 - last_write_checksum: sha1:e4321c1141ba7b1f6a8c217124e02ea0c70d9ad1 - pristine_git_object: 0e86ea6cb79fd4598d527dfef403ba66d435d3bb - src/mistralai/gcp/client/models/sdkerror.py: - id: a7cf4fa8974b - last_write_checksum: sha1:a3b60234deceb7fbcb57926c265e02e9fefc0835 - pristine_git_object: 00bc1d99353e7e2415d92c3e906c2c09712e5a64 src/mistralai/gcp/client/models/security.py: id: 7e13bda8273b last_write_checksum: sha1:7086e929823d4eefe80cc279b605adfc8bbb08aa pristine_git_object: 10a469b54d5e03873fb7d7d98627f2376c93d484 src/mistralai/gcp/client/models/systemmessage.py: id: 6537664d2d1b - last_write_checksum: sha1:e7f8dc73154c6985fcdbb77259df9bbc4745f976 - pristine_git_object: a7d695a7791eb5e97cd8f74e81c475c78e4b1a67 + last_write_checksum: sha1:779cb07cfd63ebe9eec496177cf1a8f5c077e417 + pristine_git_object: b3795c4bf4e97853979e0042cf4bd151d60ef974 src/mistralai/gcp/client/models/systemmessagecontentchunks.py: id: e120a6469c89 - last_write_checksum: sha1:55529f2f29ba3087fbf117dbbe64e1dda92b2958 - pristine_git_object: 225f38b712f5f3c7abfd526cc8c0386687814f36 + last_write_checksum: sha1:d1f96498cbb540b91425e70ffa33892ff4d1c8cd + pristine_git_object: 8de71c909eda2ed0166a6be8f8ee029956e5766b src/mistralai/gcp/client/models/textchunk.py: id: a134f120d4dc - last_write_checksum: sha1:9f46381e01f235560017ea80fbc85210eb625a99 - pristine_git_object: 77576c9fd87f0861bf6a3496aeae7e8bb8dc986a + last_write_checksum: sha1:1ccc7d232136d6278d670542d192f36f46862df1 + pristine_git_object: 690322725c0f852a005d08c5b722c41709868b22 src/mistralai/gcp/client/models/thinkchunk.py: id: 59a1d1ef2020 - last_write_checksum: sha1:9fcccb19d87bc41f771cae710eeb8f28c229070d - pristine_git_object: b65fffb21d5cb060acaa648a70e337a43595cd32 + last_write_checksum: sha1:066eeb10de301264e601a9ec64d21e1cc13b0c20 + pristine_git_object: 33ec83949499d99a28c55bb20429ab948bb5b1e8 src/mistralai/gcp/client/models/tool.py: id: 4b27d45e56ad - last_write_checksum: sha1:6d139575b740ea1f9f68a73b7bc2c95c30a10345 - pristine_git_object: d09c68542f2cb1f3bae0ffc7a7b163ad08a8e973 + last_write_checksum: sha1:cb0d879a55218fd7753bdd005be8a155982feb8f + pristine_git_object: 670aa81f8767e7c079105cf5995225168b4d6eb6 src/mistralai/gcp/client/models/toolcall.py: id: e6c25869a579 - last_write_checksum: sha1:5acf0eca8b1f4c459c6d8cadbbbd90605201ddc0 - pristine_git_object: a1edf3370426957980ff212367d56909ea8fa548 + last_write_checksum: sha1:f88e69a8e352025ca4b6897f6c16e1f7e4cd7264 + pristine_git_object: 3ea8e283c8f695bcc1fbc734b0074d37c2efeac8 src/mistralai/gcp/client/models/toolchoice.py: id: cb13a9f64c92 - last_write_checksum: sha1:3ad6b48b24b39609e86229193ad18d84b1b3c818 - pristine_git_object: de3828dac8bc23e32b9f9434adccc770b5ce1212 + last_write_checksum: sha1:71be72b1aae19aef1f8a461c89b71ad6daa009b7 + pristine_git_object: 6e795fd72792f740c8aa5b4da7d1f516018f2c2e src/mistralai/gcp/client/models/toolchoiceenum.py: id: d62e9c92d93c last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 src/mistralai/gcp/client/models/toolmessage.py: id: b3774786c2e9 - last_write_checksum: sha1:ef21eb555f41ec70010dbcea1a155af988936061 - pristine_git_object: 65b1d9d62d37361a06b3fd3ee1790eb3a976a94f + last_write_checksum: sha1:3d414da8132467d1472ebe485802ffc78eb6f7e4 + pristine_git_object: ce160391f37ce3568daf2877f8dc1aa0f3694821 src/mistralai/gcp/client/models/tooltypes.py: id: 5926c64f5229 last_write_checksum: sha1:ffd576511eed9f823c3d67df9fc5574d8d53c54b pristine_git_object: fd1aa13d7b8c5d9bdb0922e04b8bd653ff843f60 src/mistralai/gcp/client/models/usageinfo.py: id: 3aab1af66cff - last_write_checksum: sha1:47c6311bc1db47849a72c8e1bcc64dac9cec637e - pristine_git_object: 9b7207b10ea9d46d8216c104c45be1a52fb093d9 + last_write_checksum: sha1:c0c949ac48ed35efe1e8fbf820b8e390edd9c3ce + pristine_git_object: cb6feb6e8d173d39b828d8f5b38af75173b4f7f2 src/mistralai/gcp/client/models/usermessage.py: id: 9cfa7260463e - last_write_checksum: sha1:580acf868a3d180eef34b2af9c2d20f78e4fb693 - pristine_git_object: c083e16d4aa536beec9f9e1151ebbe8c1797798c + last_write_checksum: sha1:780984241b84a7dfe1f6ad6eccace1204bfec8bd + pristine_git_object: e237e900421a9e65fd15aede29ade0e510b189f6 src/mistralai/gcp/client/models/validationerror.py: id: 6b4f4910ea9c last_write_checksum: sha1:2792fd656f55519902f37670fb9fb3b43b4aa016 @@ -542,8 +546,8 @@ trackedFiles: pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee src/mistralai/gcp/client/utils/__init__.py: id: a30c8ff6dcff - last_write_checksum: sha1:887f56a717845fab7445cc368d2a17d850c3565a - pristine_git_object: 05f26ade57efb8c54a774fbcb939fb1a7dc655ce + last_write_checksum: sha1:3ad22a588864c93bd3a16605f669955b5f3b8053 + pristine_git_object: b488c2df1390b22be3050eee72832a91c76d5385 src/mistralai/gcp/client/utils/annotations.py: id: 9b2cd4ffc6e9 last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc @@ -552,18 +556,22 @@ trackedFiles: id: dd1f0f91ea9d last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 + src/mistralai/gcp/client/utils/dynamic_imports.py: + id: 0091051cb000 + last_write_checksum: sha1:a1940c63feb8eddfd8026de53384baf5056d5dcc + pristine_git_object: 673edf82a97d0fea7295625d3e092ea369a36b79 src/mistralai/gcp/client/utils/enums.py: id: 2341407d5443 last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 src/mistralai/gcp/client/utils/eventstreaming.py: id: bb66f0c3e0dc - last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b - pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc + last_write_checksum: sha1:ffa870a25a7e4e2015bfd7a467ccd3aa1de97f0e + pristine_git_object: f2052fc22d9fd6c663ba3dce019fe234ca37108b src/mistralai/gcp/client/utils/forms.py: id: ebf34781d6bd - last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 - pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 + last_write_checksum: sha1:0ca31459b99f761fcc6d0557a0a38daac4ad50f4 + pristine_git_object: 1e550bd5c2c35d977ddc10f49d77c23cb12c158d src/mistralai/gcp/client/utils/headers.py: id: 4c369582903e last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 @@ -586,20 +594,24 @@ trackedFiles: pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c src/mistralai/gcp/client/utils/retries.py: id: 542ebd75b79b - last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 - pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 + last_write_checksum: sha1:471372f5c5d1dd5583239c9cf3c75f1b636e5d87 + pristine_git_object: af07d4e941007af4213c5ec9047ef8a2fca04e5e src/mistralai/gcp/client/utils/security.py: id: 5273152365f4 - last_write_checksum: sha1:a17130ace2c0db6394f38dd941ad2b700cc755c8 - pristine_git_object: 295a3f40031dbb40073ad227fd4a355660f97ab2 + last_write_checksum: sha1:435dd8b180cefcd733e635b9fa45512da091d9c0 + pristine_git_object: 17996bd54b8624009802fbbdf30bcb4225b8dfed src/mistralai/gcp/client/utils/serializers.py: id: a7836e553d41 last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 + src/mistralai/gcp/client/utils/unions.py: + id: 8abba1cf1b6d + last_write_checksum: sha1:6e38049f323e0b5fb4bd0e88ab51ec447197ccb0 + pristine_git_object: a227f4e87be22fce682fcae5813b71835199ec5e src/mistralai/gcp/client/utils/unmarshal_json_response.py: id: d972d22cf934 - last_write_checksum: sha1:a68b9e491188e6c1956a749530eac3c7dc8004e7 - pristine_git_object: 83e8275e59adf51fb01a0579ae26627ee29fee49 + last_write_checksum: sha1:5c75fb4ee04ae80a350ceb96abf4e1fdb255ee6c + pristine_git_object: ead3e5a00171b3a97af5112b6cd9ece698ce74f5 src/mistralai/gcp/client/utils/url.py: id: 0d311bbcb8f8 last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 diff --git a/packages/gcp/.speakeasy/gen.yaml b/packages/gcp/.speakeasy/gen.yaml index 93cc5a42..54336636 100644 --- a/packages/gcp/.speakeasy/gen.yaml +++ b/packages/gcp/.speakeasy/gen.yaml @@ -13,8 +13,9 @@ generation: requestResponseComponentNamesFeb2024: true securityFeb2025: true sharedErrorComponentsApr2025: true - methodSignaturesApr2024: true sharedNestedComponentsJan2026: true + nameOverrideFeb2026: true + methodSignaturesApr2024: true auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -22,31 +23,37 @@ generation: schemas: allOfMergeStrategy: shallowMerge requestBodyFieldName: "" + versioningStrategy: automatic persistentEdits: {} tests: generateTests: true generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0a4 + version: 2.0.0-a4.1 additionalDependencies: dev: pytest: ^8.2.2 pytest-asyncio: ^0.23.7 + main: {} allowedRedefinedBuiltins: - id - object + - input + - dir asyncMode: both authors: - Mistral baseErrorName: MistralGCPError clientServerStatusCodesAsErrors: true - constFieldCasing: upper + constFieldCasing: normal defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in GCP. enableCustomCodeRegions: false enumFormat: union fixFlags: + asyncPaginationSep2025: true + conflictResistantModelImportsFeb2026: true responseRequiredSep2024: true flatAdditionalProperties: true flattenGlobalSecurity: true @@ -58,17 +65,17 @@ python: option: openapi paths: callbacks: "" - errors: "" + errors: errors operations: "" shared: "" webhooks: "" inferUnionDiscriminators: true inputModelSuffix: input license: "" - maxMethodParams: 15 + maxMethodParams: 999 methodArguments: infer-optional-args moduleName: mistralai.gcp.client - multipartArrayFormat: legacy + multipartArrayFormat: standard outputModelSuffix: output packageManager: uv packageName: mistralai-gcp @@ -78,3 +85,4 @@ python: responseFormat: flat sseFlatResponse: false templateVersion: v2 + useAsyncHooks: false diff --git a/packages/gcp/docs/models/httpvalidationerror.md b/packages/gcp/docs/errors/httpvalidationerror.md similarity index 100% rename from packages/gcp/docs/models/httpvalidationerror.md rename to packages/gcp/docs/errors/httpvalidationerror.md diff --git a/packages/gcp/docs/models/chatcompletionrequest.md b/packages/gcp/docs/models/chatcompletionrequest.md index 61a25d86..8dbd4a82 100644 --- a/packages/gcp/docs/models/chatcompletionrequest.md +++ b/packages/gcp/docs/models/chatcompletionrequest.md @@ -14,7 +14,7 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.ChatCompletionRequestMessage](../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | diff --git a/packages/gcp/docs/models/chatcompletionstreamrequest.md b/packages/gcp/docs/models/chatcompletionstreamrequest.md index 3e790e7d..db76b6c8 100644 --- a/packages/gcp/docs/models/chatcompletionstreamrequest.md +++ b/packages/gcp/docs/models/chatcompletionstreamrequest.md @@ -14,7 +14,7 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.ChatCompletionStreamRequestMessage](../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | diff --git a/packages/gcp/pylintrc b/packages/gcp/pylintrc index a8fcb932..0391ac11 100644 --- a/packages/gcp/pylintrc +++ b/packages/gcp/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.9 +py-version=3.10 # Discover python modules and packages in the file system subtree. recursive=no @@ -459,7 +459,8 @@ disable=raw-checker-failed, consider-using-with, wildcard-import, unused-wildcard-import, - too-many-return-statements + too-many-return-statements, + redefined-builtin # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option @@ -641,7 +642,7 @@ additional-builtins= allow-global-unused-variables=yes # List of names allowed to shadow builtins -allowed-redefined-builtins=id,object +allowed-redefined-builtins=id,object,input,dir # List of strings which can identify a callback function by name. A callback # name must start or end with one of those strings. diff --git a/packages/gcp/src/mistralai/gcp/client/__init__.py b/packages/gcp/src/mistralai/gcp/client/__init__.py index dd02e42e..833c68cd 100644 --- a/packages/gcp/src/mistralai/gcp/client/__init__.py +++ b/packages/gcp/src/mistralai/gcp/client/__init__.py @@ -9,7 +9,6 @@ ) from .sdk import * from .sdkconfiguration import * -from .models import * VERSION: str = __version__ diff --git a/packages/gcp/src/mistralai/gcp/client/_version.py b/packages/gcp/src/mistralai/gcp/client/_version.py index ba48dac1..204c92a6 100644 --- a/packages/gcp/src/mistralai/gcp/client/_version.py +++ b/packages/gcp/src/mistralai/gcp/client/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-gcp" -__version__: str = "2.0.0a4" +__version__: str = "2.0.0-a4.1" __openapi_doc_version__: str = "1.0.0" -__gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 2.0.0a4 2.794.1 1.0.0 mistralai-gcp" +__gen_version__: str = "2.841.0" +__user_agent__: str = "speakeasy-sdk/python 2.0.0-a4.1 2.841.0 1.0.0 mistralai-gcp" try: if __package__ is not None: diff --git a/packages/gcp/src/mistralai/gcp/client/basesdk.py b/packages/gcp/src/mistralai/gcp/client/basesdk.py index 6f9f5fd9..b3edcb0a 100644 --- a/packages/gcp/src/mistralai/gcp/client/basesdk.py +++ b/packages/gcp/src/mistralai/gcp/client/basesdk.py @@ -2,7 +2,7 @@ from .sdkconfiguration import SDKConfiguration import httpx -from mistralai.gcp.client import models, utils +from mistralai.gcp.client import errors, utils from mistralai.gcp.client._hooks import ( AfterErrorContext, AfterSuccessContext, @@ -12,6 +12,7 @@ RetryConfig, SerializedRequestBody, get_body_content, + run_sync_in_thread, ) from typing import Callable, List, Mapping, Optional, Tuple from urllib.parse import parse_qs, urlparse @@ -264,7 +265,7 @@ def do(): if http_res is None: logger.debug("Raising no response SDK error") - raise models.NoResponseError("No response received") + raise errors.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -285,7 +286,7 @@ def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred", http_res) + raise errors.SDKError("Unexpected error occurred", http_res) return http_res @@ -315,7 +316,10 @@ async def do_request_async( async def do(): http_res = None try: - req = hooks.before_request(BeforeRequestContext(hook_ctx), request) + req = await run_sync_in_thread( + hooks.before_request, BeforeRequestContext(hook_ctx), request + ) + logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -329,14 +333,17 @@ async def do(): http_res = await client.send(req, stream=stream) except Exception as e: - _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) + _, e = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), None, e + ) + if e is not None: logger.debug("Request Exception", exc_info=True) raise e if http_res is None: logger.debug("Raising no response SDK error") - raise models.NoResponseError("No response received") + raise errors.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -347,9 +354,10 @@ async def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = hooks.after_error( - AfterErrorContext(hook_ctx), http_res, None + result, err = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), http_res, None ) + if err is not None: logger.debug("Request Exception", exc_info=True) raise err @@ -357,7 +365,7 @@ async def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred", http_res) + raise errors.SDKError("Unexpected error occurred", http_res) return http_res @@ -369,6 +377,8 @@ async def do(): http_res = await do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) + http_res = await run_sync_in_thread( + hooks.after_success, AfterSuccessContext(hook_ctx), http_res + ) return http_res diff --git a/packages/gcp/src/mistralai/gcp/client/chat.py b/packages/gcp/src/mistralai/gcp/client/chat.py index 78541248..925d69ed 100644 --- a/packages/gcp/src/mistralai/gcp/client/chat.py +++ b/packages/gcp/src/mistralai/gcp/client/chat.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK -from mistralai.gcp.client import models, utils +from mistralai.gcp.client import errors, models, utils from mistralai.gcp.client._hooks import HookContext from mistralai.gcp.client.types import OptionalNullable, UNSET from mistralai.gcp.client.utils import eventstreaming @@ -176,18 +176,18 @@ def stream( if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -353,18 +353,18 @@ async def stream_async( if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) def complete( self, @@ -521,17 +521,17 @@ def complete( return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -688,14 +688,14 @@ async def complete_async( return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/packages/gcp/src/mistralai/gcp/client/errors/__init__.py b/packages/gcp/src/mistralai/gcp/client/errors/__init__.py new file mode 100644 index 00000000..00c8ee00 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/errors/__init__.py @@ -0,0 +1,39 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .mistralgcperror import MistralGCPError +from typing import Any, TYPE_CHECKING + +from mistralai.gcp.client.utils.dynamic_imports import lazy_getattr, lazy_dir + +if TYPE_CHECKING: + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .no_response_error import NoResponseError + from .responsevalidationerror import ResponseValidationError + from .sdkerror import SDKError + +__all__ = [ + "HTTPValidationError", + "HTTPValidationErrorData", + "MistralGCPError", + "NoResponseError", + "ResponseValidationError", + "SDKError", +] + +_dynamic_imports: dict[str, str] = { + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "NoResponseError": ".no_response_error", + "ResponseValidationError": ".responsevalidationerror", + "SDKError": ".sdkerror", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/gcp/src/mistralai/gcp/client/models/httpvalidationerror.py b/packages/gcp/src/mistralai/gcp/client/errors/httpvalidationerror.py similarity index 77% rename from packages/gcp/src/mistralai/gcp/client/models/httpvalidationerror.py rename to packages/gcp/src/mistralai/gcp/client/errors/httpvalidationerror.py index 57df7260..59806819 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/httpvalidationerror.py +++ b/packages/gcp/src/mistralai/gcp/client/errors/httpvalidationerror.py @@ -1,16 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .validationerror import ValidationError from dataclasses import dataclass, field import httpx -from mistralai.gcp.client.models import MistralGCPError +from mistralai.gcp.client.errors import MistralGCPError +from mistralai.gcp.client.models import validationerror as models_validationerror from mistralai.gcp.client.types import BaseModel from typing import List, Optional class HTTPValidationErrorData(BaseModel): - detail: Optional[List[ValidationError]] = None + detail: Optional[List[models_validationerror.ValidationError]] = None @dataclass(unsafe_hash=True) diff --git a/packages/gcp/src/mistralai/gcp/client/models/mistralgcperror.py b/packages/gcp/src/mistralai/gcp/client/errors/mistralgcperror.py similarity index 100% rename from packages/gcp/src/mistralai/gcp/client/models/mistralgcperror.py rename to packages/gcp/src/mistralai/gcp/client/errors/mistralgcperror.py diff --git a/packages/gcp/src/mistralai/gcp/client/models/no_response_error.py b/packages/gcp/src/mistralai/gcp/client/errors/no_response_error.py similarity index 100% rename from packages/gcp/src/mistralai/gcp/client/models/no_response_error.py rename to packages/gcp/src/mistralai/gcp/client/errors/no_response_error.py diff --git a/packages/gcp/src/mistralai/gcp/client/models/responsevalidationerror.py b/packages/gcp/src/mistralai/gcp/client/errors/responsevalidationerror.py similarity index 92% rename from packages/gcp/src/mistralai/gcp/client/models/responsevalidationerror.py rename to packages/gcp/src/mistralai/gcp/client/errors/responsevalidationerror.py index 0e86ea6c..e8bd83c1 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/responsevalidationerror.py +++ b/packages/gcp/src/mistralai/gcp/client/errors/responsevalidationerror.py @@ -4,7 +4,7 @@ from typing import Optional from dataclasses import dataclass -from mistralai.gcp.client.models import MistralGCPError +from mistralai.gcp.client.errors import MistralGCPError @dataclass(unsafe_hash=True) diff --git a/packages/gcp/src/mistralai/gcp/client/models/sdkerror.py b/packages/gcp/src/mistralai/gcp/client/errors/sdkerror.py similarity index 95% rename from packages/gcp/src/mistralai/gcp/client/models/sdkerror.py rename to packages/gcp/src/mistralai/gcp/client/errors/sdkerror.py index 00bc1d99..69809246 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/sdkerror.py +++ b/packages/gcp/src/mistralai/gcp/client/errors/sdkerror.py @@ -4,7 +4,7 @@ from typing import Optional from dataclasses import dataclass -from mistralai.gcp.client.models import MistralGCPError +from mistralai.gcp.client.errors import MistralGCPError MAX_MESSAGE_LEN = 10_000 diff --git a/packages/gcp/src/mistralai/gcp/client/fim.py b/packages/gcp/src/mistralai/gcp/client/fim.py index e2acacd5..4202102a 100644 --- a/packages/gcp/src/mistralai/gcp/client/fim.py +++ b/packages/gcp/src/mistralai/gcp/client/fim.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK -from mistralai.gcp.client import models, utils +from mistralai.gcp.client import errors, models, utils from mistralai.gcp.client._hooks import HookContext from mistralai.gcp.client.types import OptionalNullable, UNSET from mistralai.gcp.client.utils import eventstreaming @@ -133,18 +133,18 @@ def stream( if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -267,18 +267,18 @@ async def stream_async( if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) def complete( self, @@ -394,17 +394,17 @@ def complete( return unmarshal_json_response(models.FIMCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -520,14 +520,14 @@ async def complete_async( return unmarshal_json_response(models.FIMCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/packages/gcp/src/mistralai/gcp/client/models/__init__.py b/packages/gcp/src/mistralai/gcp/client/models/__init__.py index fb446c25..575f6404 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/__init__.py +++ b/packages/gcp/src/mistralai/gcp/client/models/__init__.py @@ -1,10 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .mistralgcperror import MistralGCPError -from typing import TYPE_CHECKING -from importlib import import_module -import builtins -import sys +from typing import Any, TYPE_CHECKING + +from mistralai.gcp.client.utils.dynamic_imports import lazy_getattr, lazy_dir if TYPE_CHECKING: from .assistantmessage import ( @@ -49,7 +47,7 @@ CompletionResponseStreamChoiceFinishReason, CompletionResponseStreamChoiceTypedDict, ) - from .contentchunk import ContentChunk, ContentChunkTypedDict + from .contentchunk import ContentChunk, ContentChunkTypedDict, UnknownContentChunk from .deltamessage import ( DeltaMessage, DeltaMessageContent, @@ -80,7 +78,6 @@ FunctionCallTypedDict, ) from .functionname import FunctionName, FunctionNameTypedDict - from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData from .imagedetail import ImageDetail from .imageurl import ImageURL, ImageURLTypedDict from .imageurlchunk import ( @@ -91,13 +88,10 @@ ) from .jsonschema import JSONSchema, JSONSchemaTypedDict from .mistralpromptmode import MistralPromptMode - from .no_response_error import NoResponseError from .prediction import Prediction, PredictionTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats - from .responsevalidationerror import ResponseValidationError - from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .systemmessage import ( SystemMessage, @@ -193,8 +187,6 @@ "FunctionName", "FunctionNameTypedDict", "FunctionTypedDict", - "HTTPValidationError", - "HTTPValidationErrorData", "ImageDetail", "ImageURL", "ImageURLChunk", @@ -206,9 +198,7 @@ "JSONSchemaTypedDict", "Loc", "LocTypedDict", - "MistralGCPError", "MistralPromptMode", - "NoResponseError", "Prediction", "PredictionTypedDict", "ReferenceChunk", @@ -216,8 +206,6 @@ "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", - "ResponseValidationError", - "SDKError", "Security", "SecurityTypedDict", "SystemMessage", @@ -244,6 +232,7 @@ "ToolMessageTypedDict", "ToolTypedDict", "ToolTypes", + "UnknownContentChunk", "UsageInfo", "UsageInfoTypedDict", "UserMessage", @@ -289,6 +278,7 @@ "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", "ContentChunk": ".contentchunk", "ContentChunkTypedDict": ".contentchunk", + "UnknownContentChunk": ".contentchunk", "DeltaMessage": ".deltamessage", "DeltaMessageContent": ".deltamessage", "DeltaMessageContentTypedDict": ".deltamessage", @@ -311,8 +301,6 @@ "FunctionCallTypedDict": ".functioncall", "FunctionName": ".functionname", "FunctionNameTypedDict": ".functionname", - "HTTPValidationError": ".httpvalidationerror", - "HTTPValidationErrorData": ".httpvalidationerror", "ImageDetail": ".imagedetail", "ImageURL": ".imageurl", "ImageURLTypedDict": ".imageurl", @@ -323,7 +311,6 @@ "JSONSchema": ".jsonschema", "JSONSchemaTypedDict": ".jsonschema", "MistralPromptMode": ".mistralpromptmode", - "NoResponseError": ".no_response_error", "Prediction": ".prediction", "PredictionTypedDict": ".prediction", "ReferenceChunk": ".referencechunk", @@ -331,8 +318,6 @@ "ResponseFormat": ".responseformat", "ResponseFormatTypedDict": ".responseformat", "ResponseFormats": ".responseformats", - "ResponseValidationError": ".responsevalidationerror", - "SDKError": ".sdkerror", "Security": ".security", "SecurityTypedDict": ".security", "SystemMessage": ".systemmessage", @@ -372,39 +357,11 @@ } -def dynamic_import(modname, retries=3): - for attempt in range(retries): - try: - return import_module(modname, __package__) - except KeyError: - # Clear any half-initialized module and retry - sys.modules.pop(modname, None) - if attempt == retries - 1: - break - raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") - - -def __getattr__(attr_name: str) -> object: - module_name = _dynamic_imports.get(attr_name) - if module_name is None: - raise AttributeError( - f"No {attr_name} found in _dynamic_imports for module name -> {__name__} " - ) - - try: - module = dynamic_import(module_name) - result = getattr(module, attr_name) - return result - except ImportError as e: - raise ImportError( - f"Failed to import {attr_name} from {module_name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to get {attr_name} from {module_name}: {e}" - ) from e +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) def __dir__(): - lazy_attrs = builtins.list(_dynamic_imports.keys()) - return builtins.sorted(lazy_attrs) + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py b/packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py index 7061775b..702ac470 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py @@ -37,7 +37,7 @@ class AssistantMessageTypedDict(TypedDict): class AssistantMessage(BaseModel): - ROLE: Annotated[ + role: Annotated[ Annotated[ Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) ], @@ -53,30 +53,31 @@ class AssistantMessage(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["role", "content", "tool_calls", "prefix"] - nullable_fields = ["content", "tool_calls"] - null_default_fields = [] - + optional_fields = set(["role", "content", "tool_calls", "prefix"]) + nullable_fields = set(["content", "tool_calls"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + AssistantMessage.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py index 1bc03922..8229c5bb 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py @@ -165,55 +165,54 @@ class ChatCompletionRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py index 0a5a0021..3c228d2e 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py @@ -163,55 +163,54 @@ class ChatCompletionStreamRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/completionchunk.py b/packages/gcp/src/mistralai/gcp/client/models/completionchunk.py index 9e54cb6d..a0b1ae2f 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/completionchunk.py +++ b/packages/gcp/src/mistralai/gcp/client/models/completionchunk.py @@ -6,7 +6,8 @@ CompletionResponseStreamChoiceTypedDict, ) from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import List, Optional from typing_extensions import NotRequired, TypedDict @@ -32,3 +33,19 @@ class CompletionChunk(BaseModel): created: Optional[int] = None usage: Optional[UsageInfo] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "created", "usage"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py b/packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py index 6f306721..e58d4c88 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py +++ b/packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py @@ -39,30 +39,14 @@ class CompletionResponseStreamChoice(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["finish_reason"] - null_default_fields = [] - serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): + if val != UNSET_SENTINEL: m[k] = val return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/contentchunk.py b/packages/gcp/src/mistralai/gcp/client/models/contentchunk.py index 1cd9e502..18d48150 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/contentchunk.py +++ b/packages/gcp/src/mistralai/gcp/client/models/contentchunk.py @@ -4,9 +4,12 @@ from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from mistralai.gcp.client.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union +from functools import partial +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union from typing_extensions import Annotated, TypeAliasType @@ -16,11 +19,32 @@ ) +class UnknownContentChunk(BaseModel): + r"""A ContentChunk variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CONTENT_CHUNK_VARIANTS: dict[str, Any] = { + "image_url": ImageURLChunk, + "text": TextChunk, + "reference": ReferenceChunk, +} + + ContentChunk = Annotated[ - Union[ - Annotated[ImageURLChunk, Tag("image_url")], - Annotated[TextChunk, Tag("text")], - Annotated[ReferenceChunk, Tag("reference")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Union[ImageURLChunk, TextChunk, ReferenceChunk, UnknownContentChunk], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_CONTENT_CHUNK_VARIANTS, + unknown_cls=UnknownContentChunk, + union_name="ContentChunk", + ) + ), ] diff --git a/packages/gcp/src/mistralai/gcp/client/models/deltamessage.py b/packages/gcp/src/mistralai/gcp/client/models/deltamessage.py index 96923518..63e6a7f3 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/deltamessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/deltamessage.py @@ -40,30 +40,25 @@ class DeltaMessage(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["role", "content", "tool_calls"] - null_default_fields = [] - + optional_fields = set(["role", "content", "tool_calls"]) + nullable_fields = set(["role", "content", "tool_calls"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py index f37bbcc3..e460f76c 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py +++ b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py @@ -84,47 +84,46 @@ class FIMCompletionRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - null_default_fields = [] - + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py index 8e610261..fffc3054 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py +++ b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py @@ -82,47 +82,46 @@ class FIMCompletionStreamRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - null_default_fields = [] - + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/function.py b/packages/gcp/src/mistralai/gcp/client/models/function.py index 28577eff..439e8313 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/function.py +++ b/packages/gcp/src/mistralai/gcp/client/models/function.py @@ -1,7 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Any, Dict, Optional from typing_extensions import NotRequired, TypedDict @@ -21,3 +22,19 @@ class Function(BaseModel): description: Optional[str] = None strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "strict"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/imageurl.py b/packages/gcp/src/mistralai/gcp/client/models/imageurl.py index d4f298f1..903d0a1a 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/imageurl.py +++ b/packages/gcp/src/mistralai/gcp/client/models/imageurl.py @@ -25,30 +25,25 @@ class ImageURL(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["detail"] - nullable_fields = ["detail"] - null_default_fields = [] - + optional_fields = set(["detail"]) + nullable_fields = set(["detail"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py b/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py index fc5284c1..4bec0eec 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py +++ b/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py @@ -30,7 +30,13 @@ class ImageURLChunk(BaseModel): image_url: ImageURLUnion - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["image_url"], AfterValidator(validate_const("image_url"))], pydantic.Field(alias="type"), ] = "image_url" + + +try: + ImageURLChunk.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/jsonschema.py b/packages/gcp/src/mistralai/gcp/client/models/jsonschema.py index 443c429d..684ac09f 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/jsonschema.py +++ b/packages/gcp/src/mistralai/gcp/client/models/jsonschema.py @@ -32,30 +32,31 @@ class JSONSchema(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["description", "strict"] - nullable_fields = ["description"] - null_default_fields = [] - + optional_fields = set(["description", "strict"]) + nullable_fields = set(["description"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + JSONSchema.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/prediction.py b/packages/gcp/src/mistralai/gcp/client/models/prediction.py index f53579ed..2e325289 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/prediction.py +++ b/packages/gcp/src/mistralai/gcp/client/models/prediction.py @@ -1,9 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL from mistralai.gcp.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -19,7 +20,7 @@ class PredictionTypedDict(TypedDict): class Prediction(BaseModel): r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - TYPE: Annotated[ + type: Annotated[ Annotated[ Optional[Literal["content"]], AfterValidator(validate_const("content")) ], @@ -27,3 +28,25 @@ class Prediction(BaseModel): ] = "content" content: Optional[str] = "" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "content"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + Prediction.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py b/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py index 274ea7f7..261c4755 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py +++ b/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py @@ -1,9 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL from mistralai.gcp.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional from typing_extensions import Annotated, TypedDict @@ -17,9 +18,31 @@ class ReferenceChunkTypedDict(TypedDict): class ReferenceChunk(BaseModel): reference_ids: List[int] - TYPE: Annotated[ + type: Annotated[ Annotated[ Optional[Literal["reference"]], AfterValidator(validate_const("reference")) ], pydantic.Field(alias="type"), ] = "reference" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ReferenceChunk.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/responseformat.py b/packages/gcp/src/mistralai/gcp/client/models/responseformat.py index 34ae6b03..f3aa9930 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/responseformat.py +++ b/packages/gcp/src/mistralai/gcp/client/models/responseformat.py @@ -31,30 +31,25 @@ class ResponseFormat(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "json_schema"] - nullable_fields = ["json_schema"] - null_default_fields = [] - + optional_fields = set(["type", "json_schema"]) + nullable_fields = set(["json_schema"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/systemmessage.py b/packages/gcp/src/mistralai/gcp/client/models/systemmessage.py index a7d695a7..b3795c4b 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/systemmessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/systemmessage.py @@ -32,7 +32,13 @@ class SystemMessageTypedDict(TypedDict): class SystemMessage(BaseModel): content: SystemMessageContent - ROLE: Annotated[ + role: Annotated[ Annotated[Literal["system"], AfterValidator(validate_const("system"))], pydantic.Field(alias="role"), ] = "system" + + +try: + SystemMessage.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py b/packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py index 225f38b7..8de71c90 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py +++ b/packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py @@ -15,5 +15,5 @@ SystemMessageContentChunks = Annotated[ - Union[TextChunk, ThinkChunk], Field(discriminator="TYPE") + Union[TextChunk, ThinkChunk], Field(discriminator="type") ] diff --git a/packages/gcp/src/mistralai/gcp/client/models/textchunk.py b/packages/gcp/src/mistralai/gcp/client/models/textchunk.py index 77576c9f..69032272 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/textchunk.py +++ b/packages/gcp/src/mistralai/gcp/client/models/textchunk.py @@ -17,7 +17,13 @@ class TextChunkTypedDict(TypedDict): class TextChunk(BaseModel): text: str - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["text"], AfterValidator(validate_const("text"))], pydantic.Field(alias="type"), ] = "text" + + +try: + TextChunk.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py b/packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py index b65fffb2..33ec8394 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py +++ b/packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py @@ -3,9 +3,10 @@ from __future__ import annotations from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL from mistralai.gcp.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -29,10 +30,32 @@ class ThinkChunkTypedDict(TypedDict): class ThinkChunk(BaseModel): thinking: List[Thinking] - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["thinking"], AfterValidator(validate_const("thinking"))], pydantic.Field(alias="type"), ] = "thinking" closed: Optional[bool] = None r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["closed"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ThinkChunk.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/tool.py b/packages/gcp/src/mistralai/gcp/client/models/tool.py index d09c6854..670aa81f 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/tool.py +++ b/packages/gcp/src/mistralai/gcp/client/models/tool.py @@ -3,7 +3,8 @@ from __future__ import annotations from .function import Function, FunctionTypedDict from .tooltypes import ToolTypes -from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -17,3 +18,19 @@ class Tool(BaseModel): function: Function type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/toolcall.py b/packages/gcp/src/mistralai/gcp/client/models/toolcall.py index a1edf337..3ea8e283 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/toolcall.py +++ b/packages/gcp/src/mistralai/gcp/client/models/toolcall.py @@ -3,7 +3,8 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes -from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -23,3 +24,19 @@ class ToolCall(BaseModel): type: Optional[ToolTypes] = None index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["id", "type", "index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/toolchoice.py b/packages/gcp/src/mistralai/gcp/client/models/toolchoice.py index de3828da..6e795fd7 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/toolchoice.py +++ b/packages/gcp/src/mistralai/gcp/client/models/toolchoice.py @@ -3,7 +3,8 @@ from __future__ import annotations from .functionname import FunctionName, FunctionNameTypedDict from .tooltypes import ToolTypes -from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -23,3 +24,19 @@ class ToolChoice(BaseModel): r"""this restriction of `Function` is used to select a specific function to call""" type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/toolmessage.py b/packages/gcp/src/mistralai/gcp/client/models/toolmessage.py index 65b1d9d6..ce160391 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/toolmessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/toolmessage.py @@ -35,7 +35,7 @@ class ToolMessageTypedDict(TypedDict): class ToolMessage(BaseModel): content: Nullable[ToolMessageContent] - ROLE: Annotated[ + role: Annotated[ Annotated[Literal["tool"], AfterValidator(validate_const("tool"))], pydantic.Field(alias="role"), ] = "tool" @@ -46,30 +46,31 @@ class ToolMessage(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["tool_call_id", "name"] - nullable_fields = ["content", "tool_call_id", "name"] - null_default_fields = [] - + optional_fields = set(["tool_call_id", "name"]) + nullable_fields = set(["content", "tool_call_id", "name"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ToolMessage.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/usageinfo.py b/packages/gcp/src/mistralai/gcp/client/models/usageinfo.py index 9b7207b1..cb6feb6e 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/usageinfo.py +++ b/packages/gcp/src/mistralai/gcp/client/models/usageinfo.py @@ -45,37 +45,34 @@ def additional_properties(self, value): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "prompt_tokens", - "completion_tokens", - "total_tokens", - "prompt_audio_seconds", - ] - nullable_fields = ["prompt_audio_seconds"] - null_default_fields = [] - + optional_fields = set( + [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + ) + nullable_fields = set(["prompt_audio_seconds"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val for k, v in serialized.items(): m[k] = v diff --git a/packages/gcp/src/mistralai/gcp/client/models/usermessage.py b/packages/gcp/src/mistralai/gcp/client/models/usermessage.py index c083e16d..e237e900 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/usermessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/usermessage.py @@ -27,37 +27,27 @@ class UserMessageTypedDict(TypedDict): class UserMessage(BaseModel): content: Nullable[UserMessageContent] - ROLE: Annotated[ + role: Annotated[ Annotated[Literal["user"], AfterValidator(validate_const("user"))], pydantic.Field(alias="role"), ] = "user" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["content"] - null_default_fields = [] - serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): + if val != UNSET_SENTINEL: m[k] = val return m + + +try: + UserMessage.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/utils/__init__.py b/packages/gcp/src/mistralai/gcp/client/utils/__init__.py index 05f26ade..b488c2df 100644 --- a/packages/gcp/src/mistralai/gcp/client/utils/__init__.py +++ b/packages/gcp/src/mistralai/gcp/client/utils/__init__.py @@ -1,14 +1,23 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from typing import TYPE_CHECKING -from importlib import import_module -import builtins -import sys +from typing import Any, TYPE_CHECKING, Callable, TypeVar +import asyncio + +from .dynamic_imports import lazy_getattr, lazy_dir + +_T = TypeVar("_T") + + +async def run_sync_in_thread(func: Callable[..., _T], *args) -> _T: + """Run a synchronous function in a thread pool to avoid blocking the event loop.""" + return await asyncio.to_thread(func, *args) + if TYPE_CHECKING: from .annotations import get_discriminator from .datetimes import parse_datetime from .enums import OpenEnumMeta + from .unions import parse_open_union from .headers import get_headers, get_response_headers from .metadata import ( FieldMetadata, @@ -76,6 +85,7 @@ "match_response", "MultipartFormMetadata", "OpenEnumMeta", + "parse_open_union", "PathParamMetadata", "QueryParamMetadata", "remove_suffix", @@ -128,6 +138,7 @@ "match_response": ".values", "MultipartFormMetadata": ".metadata", "OpenEnumMeta": ".enums", + "parse_open_union": ".unions", "PathParamMetadata": ".metadata", "QueryParamMetadata": ".metadata", "remove_suffix": ".url", @@ -157,38 +168,11 @@ } -def dynamic_import(modname, retries=3): - for attempt in range(retries): - try: - return import_module(modname, __package__) - except KeyError: - # Clear any half-initialized module and retry - sys.modules.pop(modname, None) - if attempt == retries - 1: - break - raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") - - -def __getattr__(attr_name: str) -> object: - module_name = _dynamic_imports.get(attr_name) - if module_name is None: - raise AttributeError( - f"no {attr_name} found in _dynamic_imports, module name -> {__name__} " - ) - - try: - module = dynamic_import(module_name) - return getattr(module, attr_name) - except ImportError as e: - raise ImportError( - f"Failed to import {attr_name} from {module_name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to get {attr_name} from {module_name}: {e}" - ) from e +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) def __dir__(): - lazy_attrs = builtins.list(_dynamic_imports.keys()) - return builtins.sorted(lazy_attrs) + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/dynamic_imports.py b/packages/gcp/src/mistralai/gcp/client/utils/dynamic_imports.py new file mode 100644 index 00000000..673edf82 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/dynamic_imports.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from importlib import import_module +import builtins +import sys + + +def dynamic_import(package, modname, retries=3): + """Import a module relative to package, retrying on KeyError from half-initialized modules.""" + for attempt in range(retries): + try: + return import_module(modname, package) + except KeyError: + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def lazy_getattr(attr_name, *, package, dynamic_imports, sub_packages=None): + """Module-level __getattr__ that lazily loads from a dynamic_imports mapping. + + Args: + attr_name: The attribute being looked up. + package: The caller's __package__ (for relative imports). + dynamic_imports: Dict mapping attribute names to relative module paths. + sub_packages: Optional list of subpackage names to lazy-load. + """ + module_name = dynamic_imports.get(attr_name) + if module_name is not None: + try: + module = dynamic_import(package, module_name) + return getattr(module, attr_name) + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + if sub_packages and attr_name in sub_packages: + return import_module(f".{attr_name}", package) + + raise AttributeError(f"module '{package}' has no attribute '{attr_name}'") + + +def lazy_dir(*, dynamic_imports, sub_packages=None): + """Module-level __dir__ that lists lazily-loadable attributes.""" + lazy_attrs = builtins.list(dynamic_imports.keys()) + if sub_packages: + lazy_attrs.extend(sub_packages) + return builtins.sorted(lazy_attrs) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/eventstreaming.py b/packages/gcp/src/mistralai/gcp/client/utils/eventstreaming.py index 0969899b..f2052fc2 100644 --- a/packages/gcp/src/mistralai/gcp/client/utils/eventstreaming.py +++ b/packages/gcp/src/mistralai/gcp/client/utils/eventstreaming.py @@ -2,7 +2,9 @@ import re import json +from dataclasses import dataclass, asdict from typing import ( + Any, Callable, Generic, TypeVar, @@ -22,6 +24,7 @@ class EventStream(Generic[T]): client_ref: Optional[object] response: httpx.Response generator: Generator[T, None, None] + _closed: bool def __init__( self, @@ -33,17 +36,21 @@ def __init__( self.response = response self.generator = stream_events(response, decoder, sentinel) self.client_ref = client_ref + self._closed = False def __iter__(self): return self def __next__(self): + if self._closed: + raise StopIteration return next(self.generator) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): + self._closed = True self.response.close() @@ -53,6 +60,7 @@ class EventStreamAsync(Generic[T]): client_ref: Optional[object] response: httpx.Response generator: AsyncGenerator[T, None] + _closed: bool def __init__( self, @@ -64,33 +72,45 @@ def __init__( self.response = response self.generator = stream_events_async(response, decoder, sentinel) self.client_ref = client_ref + self._closed = False def __aiter__(self): return self async def __anext__(self): + if self._closed: + raise StopAsyncIteration return await self.generator.__anext__() async def __aenter__(self): return self async def __aexit__(self, exc_type, exc_val, exc_tb): + self._closed = True await self.response.aclose() +@dataclass class ServerEvent: id: Optional[str] = None event: Optional[str] = None - data: Optional[str] = None + data: Any = None retry: Optional[int] = None MESSAGE_BOUNDARIES = [ b"\r\n\r\n", - b"\n\n", + b"\r\n\r", + b"\r\n\n", + b"\r\r\n", + b"\n\r\n", b"\r\r", + b"\n\r", + b"\n\n", ] +UTF8_BOM = b"\xef\xbb\xbf" + async def stream_events_async( response: httpx.Response, @@ -99,14 +119,10 @@ async def stream_events_async( ) -> AsyncGenerator[T, None]: buffer = bytearray() position = 0 - discard = False + event_id: Optional[str] = None async for chunk in response.aiter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] buffer += chunk for i in range(position, len(buffer)): char = buffer[i : i + 1] @@ -121,15 +137,22 @@ async def stream_events_async( block = buffer[position:i] position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event + if discard: + await response.aclose() + return if position > 0: buffer = buffer[position:] position = 0 - event, discard = _parse_event(buffer, decoder, sentinel) + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event @@ -141,14 +164,10 @@ def stream_events( ) -> Generator[T, None, None]: buffer = bytearray() position = 0 - discard = False + event_id: Optional[str] = None for chunk in response.iter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] buffer += chunk for i in range(position, len(buffer)): char = buffer[i : i + 1] @@ -163,22 +182,33 @@ def stream_events( block = buffer[position:i] position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event + if discard: + response.close() + return if position > 0: buffer = buffer[position:] position = 0 - event, discard = _parse_event(buffer, decoder, sentinel) + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event def _parse_event( - raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None -) -> Tuple[Optional[T], bool]: + *, + raw: bytearray, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + event_id: Optional[str] = None, +) -> Tuple[Optional[T], bool, Optional[str]]: block = raw.decode() lines = re.split(r"\r?\n|\r", block) publish = False @@ -189,13 +219,16 @@ def _parse_event( continue delim = line.find(":") - if delim <= 0: + if delim == 0: continue - field = line[0:delim] - value = line[delim + 1 :] if delim < len(line) - 1 else "" - if len(value) and value[0] == " ": - value = value[1:] + field = line + value = "" + if delim > 0: + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] if field == "event": event.event = value @@ -204,37 +237,36 @@ def _parse_event( data += value + "\n" publish = True elif field == "id": - event.id = value publish = True + if "\x00" not in value: + event_id = value elif field == "retry": - event.retry = int(value) if value.isdigit() else None + if value.isdigit(): + event.retry = int(value) publish = True + event.id = event_id + if sentinel and data == f"{sentinel}\n": - return None, True + return None, True, event_id if data: data = data[:-1] - event.data = data - - data_is_primitive = ( - data.isnumeric() or data == "true" or data == "false" or data == "null" - ) - data_is_json = ( - data.startswith("{") or data.startswith("[") or data.startswith('"') - ) - - if data_is_primitive or data_is_json: - try: - event.data = json.loads(data) - except Exception: - pass + try: + event.data = json.loads(data) + except json.JSONDecodeError: + event.data = data out = None if publish: - out = decoder(json.dumps(event.__dict__)) - - return out, False + out_dict = { + k: v + for k, v in asdict(event).items() + if v is not None or (k == "data" and data) + } + out = decoder(json.dumps(out_dict)) + + return out, False, event_id def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): diff --git a/packages/gcp/src/mistralai/gcp/client/utils/forms.py b/packages/gcp/src/mistralai/gcp/client/utils/forms.py index f961e76b..1e550bd5 100644 --- a/packages/gcp/src/mistralai/gcp/client/utils/forms.py +++ b/packages/gcp/src/mistralai/gcp/client/utils/forms.py @@ -142,7 +142,7 @@ def serialize_multipart_form( if field_metadata.file: if isinstance(val, List): # Handle array of files - array_field_name = f_name + "[]" + array_field_name = f_name for file_obj in val: if not _is_set(file_obj): continue @@ -185,7 +185,7 @@ def serialize_multipart_form( continue values.append(_val_to_string(value)) - array_field_name = f_name + "[]" + array_field_name = f_name form[array_field_name] = values else: form[f_name] = _val_to_string(val) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/retries.py b/packages/gcp/src/mistralai/gcp/client/utils/retries.py index 88a91b10..af07d4e9 100644 --- a/packages/gcp/src/mistralai/gcp/client/utils/retries.py +++ b/packages/gcp/src/mistralai/gcp/client/utils/retries.py @@ -144,12 +144,7 @@ def do_request() -> httpx.Response: if res.status_code == parsed_code: raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: + except (httpx.NetworkError, httpx.TimeoutException) as exception: if retries.config.retry_connection_errors: raise @@ -193,12 +188,7 @@ async def do_request() -> httpx.Response: if res.status_code == parsed_code: raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: + except (httpx.NetworkError, httpx.TimeoutException) as exception: if retries.config.retry_connection_errors: raise diff --git a/packages/gcp/src/mistralai/gcp/client/utils/security.py b/packages/gcp/src/mistralai/gcp/client/utils/security.py index 295a3f40..17996bd5 100644 --- a/packages/gcp/src/mistralai/gcp/client/utils/security.py +++ b/packages/gcp/src/mistralai/gcp/client/utils/security.py @@ -135,6 +135,8 @@ def _parse_security_scheme_value( elif scheme_type == "http": if sub_type == "bearer": headers[header_name] = _apply_bearer(value) + elif sub_type == "basic": + headers[header_name] = value elif sub_type == "custom": return else: diff --git a/packages/gcp/src/mistralai/gcp/client/utils/unions.py b/packages/gcp/src/mistralai/gcp/client/utils/unions.py new file mode 100644 index 00000000..a227f4e8 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/unions.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any + +from pydantic import BaseModel, TypeAdapter + + +def parse_open_union( + v: Any, + *, + disc_key: str, + variants: dict[str, Any], + unknown_cls: type, + union_name: str, +) -> Any: + """Parse an open discriminated union value with forward-compatibility. + + Known discriminator values are dispatched to their variant types. + Unknown discriminator values produce an instance of the fallback class, + preserving the raw payload for inspection. + """ + if isinstance(v, BaseModel): + return v + if not isinstance(v, dict) or disc_key not in v: + raise ValueError(f"{union_name}: expected object with '{disc_key}' field") + disc = v[disc_key] + variant_cls = variants.get(disc) + if variant_cls is not None: + if isinstance(variant_cls, type) and issubclass(variant_cls, BaseModel): + return variant_cls.model_validate(v) + return TypeAdapter(variant_cls).validate_python(v) + return unknown_cls(raw=v) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py b/packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py index 83e8275e..ead3e5a0 100644 --- a/packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py +++ b/packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py @@ -5,7 +5,7 @@ import httpx from .serializers import unmarshal_json -from mistralai.gcp.client import models +from mistralai.gcp.client import errors T = TypeVar("T") @@ -30,7 +30,7 @@ def unmarshal_json_response( try: return unmarshal_json(body, typ) except Exception as e: - raise models.ResponseValidationError( + raise errors.ResponseValidationError( "Response validation failed", http_res, e, diff --git a/pylintrc b/pylintrc index 2dc62b0e..d1653ae1 100644 --- a/pylintrc +++ b/pylintrc @@ -641,7 +641,7 @@ additional-builtins= allow-global-unused-variables=yes # List of names allowed to shadow builtins -allowed-redefined-builtins=id,object +allowed-redefined-builtins=id,object,input,dir # List of strings which can identify a callback function by name. A callback # name must start or end with one of those strings. diff --git a/src/mistralai/client/__init__.py b/src/mistralai/client/__init__.py index 481fc916..4b79610a 100644 --- a/src/mistralai/client/__init__.py +++ b/src/mistralai/client/__init__.py @@ -10,7 +10,6 @@ ) from .sdk import * from .sdkconfiguration import * -from .models import * VERSION: str = __version__ diff --git a/src/mistralai/client/_version.py b/src/mistralai/client/_version.py index 814d9ec7..1a4d15d6 100644 --- a/src/mistralai/client/_version.py +++ b/src/mistralai/client/_version.py @@ -4,10 +4,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "2.0.0a3" +__version__: str = "2.0.0-a3.1" __openapi_doc_version__: str = "1.0.0" -__gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 2.0.0a3 2.794.1 1.0.0 mistralai" +__gen_version__: str = "2.841.0" +__user_agent__: str = "speakeasy-sdk/python 2.0.0-a3.1 2.841.0 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/client/accesses.py b/src/mistralai/client/accesses.py index cda484c8..0761b0bc 100644 --- a/src/mistralai/client/accesses.py +++ b/src/mistralai/client/accesses.py @@ -2,12 +2,8 @@ # @generated-id: 76fc53bfcf59 from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - entitytype as models_entitytype, - shareenum as models_shareenum, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -46,7 +42,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.ListLibraryAccessesRequest( + request = models.LibrariesShareListV1Request( library_id=library_id, ) @@ -79,7 +75,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListLibraryAccesses", + operation_id="libraries_share_list_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -95,17 +91,17 @@ def list( return unmarshal_json_response(models.ListSharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -136,7 +132,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ListLibraryAccessesRequest( + request = models.LibrariesShareListV1Request( library_id=library_id, ) @@ -169,7 +165,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListLibraryAccesses", + operation_id="libraries_share_list_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -185,25 +181,25 @@ async def list_async( return unmarshal_json_response(models.ListSharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def update_or_create( self, *, library_id: str, - level: models_shareenum.ShareEnum, + level: models.ShareEnum, share_with_uuid: str, - share_with_type: models_entitytype.EntityType, + share_with_type: models.EntityType, org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -234,7 +230,7 @@ def update_or_create( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateOrCreateLibraryAccessRequest( + request = models.LibrariesShareCreateV1Request( library_id=library_id, sharing_in=models.SharingIn( org_id=org_id, @@ -276,7 +272,7 @@ def update_or_create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateOrCreateLibraryAccess", + operation_id="libraries_share_create_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -292,25 +288,25 @@ def update_or_create( return unmarshal_json_response(models.SharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def update_or_create_async( self, *, library_id: str, - level: models_shareenum.ShareEnum, + level: models.ShareEnum, share_with_uuid: str, - share_with_type: models_entitytype.EntityType, + share_with_type: models.EntityType, org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -341,7 +337,7 @@ async def update_or_create_async( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateOrCreateLibraryAccessRequest( + request = models.LibrariesShareCreateV1Request( library_id=library_id, sharing_in=models.SharingIn( org_id=org_id, @@ -383,7 +379,7 @@ async def update_or_create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateOrCreateLibraryAccess", + operation_id="libraries_share_create_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -399,24 +395,24 @@ async def update_or_create_async( return unmarshal_json_response(models.SharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, *, library_id: str, share_with_uuid: str, - share_with_type: models_entitytype.EntityType, + share_with_type: models.EntityType, org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -446,7 +442,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteLibraryAccessRequest( + request = models.LibrariesShareDeleteV1Request( library_id=library_id, sharing_delete=models.SharingDelete( org_id=org_id, @@ -487,7 +483,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteLibraryAccess", + operation_id="libraries_share_delete_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -503,24 +499,24 @@ def delete( return unmarshal_json_response(models.SharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, *, library_id: str, share_with_uuid: str, - share_with_type: models_entitytype.EntityType, + share_with_type: models.EntityType, org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -550,7 +546,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteLibraryAccessRequest( + request = models.LibrariesShareDeleteV1Request( library_id=library_id, sharing_delete=models.SharingDelete( org_id=org_id, @@ -591,7 +587,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteLibraryAccess", + operation_id="libraries_share_delete_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -607,14 +603,14 @@ async def delete_async( return unmarshal_json_response(models.SharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/agents.py b/src/mistralai/client/agents.py index 0942cb20..2b70d152 100644 --- a/src/mistralai/client/agents.py +++ b/src/mistralai/client/agents.py @@ -2,16 +2,8 @@ # @generated-id: e946546e3eaa from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - agentscompletionrequest as models_agentscompletionrequest, - agentscompletionstreamrequest as models_agentscompletionstreamrequest, - mistralpromptmode as models_mistralpromptmode, - prediction as models_prediction, - responseformat as models_responseformat, - tool as models_tool, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import eventstreaming, get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -25,47 +17,40 @@ def complete( self, *, messages: Union[ - List[models_agentscompletionrequest.AgentsCompletionRequestMessage], - List[ - models_agentscompletionrequest.AgentsCompletionRequestMessageTypedDict - ], + List[models.AgentsCompletionRequestMessage], + List[models.AgentsCompletionRequestMessageTypedDict], ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ - models_agentscompletionrequest.AgentsCompletionRequestStop, - models_agentscompletionrequest.AgentsCompletionRequestStopTypedDict, + models.AgentsCompletionRequestStop, + models.AgentsCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + Union[List[models.Tool], List[models.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models_agentscompletionrequest.AgentsCompletionRequestToolChoice, - models_agentscompletionrequest.AgentsCompletionRequestToolChoiceTypedDict, + models.AgentsCompletionRequestToolChoice, + models.AgentsCompletionRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -179,63 +164,56 @@ def complete( return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def complete_async( self, *, messages: Union[ - List[models_agentscompletionrequest.AgentsCompletionRequestMessage], - List[ - models_agentscompletionrequest.AgentsCompletionRequestMessageTypedDict - ], + List[models.AgentsCompletionRequestMessage], + List[models.AgentsCompletionRequestMessageTypedDict], ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ - models_agentscompletionrequest.AgentsCompletionRequestStop, - models_agentscompletionrequest.AgentsCompletionRequestStopTypedDict, + models.AgentsCompletionRequestStop, + models.AgentsCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + Union[List[models.Tool], List[models.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models_agentscompletionrequest.AgentsCompletionRequestToolChoice, - models_agentscompletionrequest.AgentsCompletionRequestToolChoiceTypedDict, + models.AgentsCompletionRequestToolChoice, + models.AgentsCompletionRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -349,65 +327,56 @@ async def complete_async( return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def stream( self, *, messages: Union[ - List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessage - ], - List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessageTypedDict - ], + List[models.AgentsCompletionStreamRequestMessage], + List[models.AgentsCompletionStreamRequestMessageTypedDict], ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStop, - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStopTypedDict, + models.AgentsCompletionStreamRequestStop, + models.AgentsCompletionStreamRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + Union[List[models.Tool], List[models.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoice, - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoiceTypedDict, + models.AgentsCompletionStreamRequestToolChoice, + models.AgentsCompletionStreamRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -530,66 +499,57 @@ def stream( if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, *, messages: Union[ - List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessage - ], - List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessageTypedDict - ], + List[models.AgentsCompletionStreamRequestMessage], + List[models.AgentsCompletionStreamRequestMessageTypedDict], ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStop, - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStopTypedDict, + models.AgentsCompletionStreamRequestStop, + models.AgentsCompletionStreamRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + Union[List[models.Tool], List[models.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoice, - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoiceTypedDict, + models.AgentsCompletionStreamRequestToolChoice, + models.AgentsCompletionStreamRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -712,15 +672,15 @@ async def stream_async( if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/basesdk.py b/src/mistralai/client/basesdk.py index 611b4059..a976121b 100644 --- a/src/mistralai/client/basesdk.py +++ b/src/mistralai/client/basesdk.py @@ -3,13 +3,18 @@ from .sdkconfiguration import SDKConfiguration import httpx -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import ( AfterErrorContext, AfterSuccessContext, BeforeRequestContext, ) -from mistralai.client.utils import RetryConfig, SerializedRequestBody, get_body_content +from mistralai.client.utils import ( + RetryConfig, + SerializedRequestBody, + get_body_content, + run_sync_in_thread, +) from typing import Callable, List, Mapping, Optional, Tuple from urllib.parse import parse_qs, urlparse @@ -261,7 +266,7 @@ def do(): if http_res is None: logger.debug("Raising no response SDK error") - raise models.NoResponseError("No response received") + raise errors.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -282,7 +287,7 @@ def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred", http_res) + raise errors.SDKError("Unexpected error occurred", http_res) return http_res @@ -312,7 +317,10 @@ async def do_request_async( async def do(): http_res = None try: - req = hooks.before_request(BeforeRequestContext(hook_ctx), request) + req = await run_sync_in_thread( + hooks.before_request, BeforeRequestContext(hook_ctx), request + ) + logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -326,14 +334,17 @@ async def do(): http_res = await client.send(req, stream=stream) except Exception as e: - _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) + _, e = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), None, e + ) + if e is not None: logger.debug("Request Exception", exc_info=True) raise e if http_res is None: logger.debug("Raising no response SDK error") - raise models.NoResponseError("No response received") + raise errors.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -344,9 +355,10 @@ async def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = hooks.after_error( - AfterErrorContext(hook_ctx), http_res, None + result, err = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), http_res, None ) + if err is not None: logger.debug("Request Exception", exc_info=True) raise err @@ -354,7 +366,7 @@ async def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred", http_res) + raise errors.SDKError("Unexpected error occurred", http_res) return http_res @@ -366,6 +378,8 @@ async def do(): http_res = await do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) + http_res = await run_sync_in_thread( + hooks.after_success, AfterSuccessContext(hook_ctx), http_res + ) return http_res diff --git a/src/mistralai/client/batch_jobs.py b/src/mistralai/client/batch_jobs.py index 752c7652..0e135b30 100644 --- a/src/mistralai/client/batch_jobs.py +++ b/src/mistralai/client/batch_jobs.py @@ -3,14 +3,8 @@ from .basesdk import BaseSDK from datetime import datetime -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - apiendpoint as models_apiendpoint, - batchjobstatus as models_batchjobstatus, - batchrequest as models_batchrequest, - listbatchjobsop as models_listbatchjobsop, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -28,13 +22,13 @@ def list( metadata: OptionalNullable[Dict[str, Any]] = UNSET, created_after: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, - status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET, - order_by: Optional[models_listbatchjobsop.OrderBy] = "-created", + status: OptionalNullable[List[models.BatchJobStatus]] = UNSET, + order_by: Optional[models.OrderBy] = "-created", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobsOut: + ) -> models.ListBatchJobsResponse: r"""Get Batch Jobs Get a list of batch jobs for your organization and user. @@ -63,7 +57,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.ListBatchJobsRequest( + request = models.JobsAPIRoutesBatchGetBatchJobsRequest( page=page, page_size=page_size, model=model, @@ -104,7 +98,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListBatchJobs", + operation_id="jobs_api_routes_batch_get_batch_jobs", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -116,15 +110,15 @@ def list( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobsOut, http_res) + return unmarshal_json_response(models.ListBatchJobsResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -136,13 +130,13 @@ async def list_async( metadata: OptionalNullable[Dict[str, Any]] = UNSET, created_after: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, - status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET, - order_by: Optional[models_listbatchjobsop.OrderBy] = "-created", + status: OptionalNullable[List[models.BatchJobStatus]] = UNSET, + order_by: Optional[models.OrderBy] = "-created", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobsOut: + ) -> models.ListBatchJobsResponse: r"""Get Batch Jobs Get a list of batch jobs for your organization and user. @@ -171,7 +165,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ListBatchJobsRequest( + request = models.JobsAPIRoutesBatchGetBatchJobsRequest( page=page, page_size=page_size, model=model, @@ -212,7 +206,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListBatchJobs", + operation_id="jobs_api_routes_batch_get_batch_jobs", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -224,26 +218,23 @@ async def list_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobsOut, http_res) + return unmarshal_json_response(models.ListBatchJobsResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def create( self, *, - endpoint: models_apiendpoint.APIEndpoint, + endpoint: models.APIEndpoint, input_files: OptionalNullable[List[str]] = UNSET, requests: OptionalNullable[ - Union[ - List[models_batchrequest.BatchRequest], - List[models_batchrequest.BatchRequestTypedDict], - ] + Union[List[models.BatchRequest], List[models.BatchRequestTypedDict]] ] = UNSET, model: OptionalNullable[str] = UNSET, agent_id: OptionalNullable[str] = UNSET, @@ -253,7 +244,7 @@ def create( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: + ) -> models.BatchJob: r"""Create Batch Job Create a new batch job, it will be queued for processing. @@ -280,7 +271,7 @@ def create( else: base_url = self._get_url(base_url, url_variables) - request = models.BatchJobIn( + request = models.CreateBatchJobRequest( input_files=input_files, requests=utils.get_pydantic_model( requests, OptionalNullable[List[models.BatchRequest]] @@ -306,7 +297,7 @@ def create( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.BatchJobIn + request, False, False, "json", models.CreateBatchJobRequest ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -324,7 +315,7 @@ def create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateBatchJob", + operation_id="jobs_api_routes_batch_create_batch_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -336,26 +327,23 @@ def create( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) + return unmarshal_json_response(models.BatchJob, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def create_async( self, *, - endpoint: models_apiendpoint.APIEndpoint, + endpoint: models.APIEndpoint, input_files: OptionalNullable[List[str]] = UNSET, requests: OptionalNullable[ - Union[ - List[models_batchrequest.BatchRequest], - List[models_batchrequest.BatchRequestTypedDict], - ] + Union[List[models.BatchRequest], List[models.BatchRequestTypedDict]] ] = UNSET, model: OptionalNullable[str] = UNSET, agent_id: OptionalNullable[str] = UNSET, @@ -365,7 +353,7 @@ async def create_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: + ) -> models.BatchJob: r"""Create Batch Job Create a new batch job, it will be queued for processing. @@ -392,7 +380,7 @@ async def create_async( else: base_url = self._get_url(base_url, url_variables) - request = models.BatchJobIn( + request = models.CreateBatchJobRequest( input_files=input_files, requests=utils.get_pydantic_model( requests, OptionalNullable[List[models.BatchRequest]] @@ -418,7 +406,7 @@ async def create_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.BatchJobIn + request, False, False, "json", models.CreateBatchJobRequest ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -436,7 +424,7 @@ async def create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateBatchJob", + operation_id="jobs_api_routes_batch_create_batch_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -448,15 +436,15 @@ async def create_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) + return unmarshal_json_response(models.BatchJob, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get( self, @@ -467,7 +455,7 @@ def get( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: + ) -> models.BatchJob: r"""Get Batch Job Get a batch job details by its UUID. @@ -492,7 +480,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.GetBatchJobRequest( + request = models.JobsAPIRoutesBatchGetBatchJobRequest( job_id=job_id, inline=inline, ) @@ -526,7 +514,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetBatchJob", + operation_id="jobs_api_routes_batch_get_batch_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -538,15 +526,15 @@ def get( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) + return unmarshal_json_response(models.BatchJob, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -557,7 +545,7 @@ async def get_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: + ) -> models.BatchJob: r"""Get Batch Job Get a batch job details by its UUID. @@ -582,7 +570,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetBatchJobRequest( + request = models.JobsAPIRoutesBatchGetBatchJobRequest( job_id=job_id, inline=inline, ) @@ -616,7 +604,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetBatchJob", + operation_id="jobs_api_routes_batch_get_batch_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -628,15 +616,15 @@ async def get_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) + return unmarshal_json_response(models.BatchJob, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def cancel( self, @@ -646,7 +634,7 @@ def cancel( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: + ) -> models.BatchJob: r"""Cancel Batch Job Request the cancellation of a batch job. @@ -667,7 +655,7 @@ def cancel( else: base_url = self._get_url(base_url, url_variables) - request = models.CancelBatchJobRequest( + request = models.JobsAPIRoutesBatchCancelBatchJobRequest( job_id=job_id, ) @@ -700,7 +688,7 @@ def cancel( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CancelBatchJob", + operation_id="jobs_api_routes_batch_cancel_batch_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -712,15 +700,15 @@ def cancel( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) + return unmarshal_json_response(models.BatchJob, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def cancel_async( self, @@ -730,7 +718,7 @@ async def cancel_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: + ) -> models.BatchJob: r"""Cancel Batch Job Request the cancellation of a batch job. @@ -751,7 +739,7 @@ async def cancel_async( else: base_url = self._get_url(base_url, url_variables) - request = models.CancelBatchJobRequest( + request = models.JobsAPIRoutesBatchCancelBatchJobRequest( job_id=job_id, ) @@ -784,7 +772,7 @@ async def cancel_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CancelBatchJob", + operation_id="jobs_api_routes_batch_cancel_batch_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -796,12 +784,12 @@ async def cancel_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) + return unmarshal_json_response(models.BatchJob, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/beta_agents.py b/src/mistralai/client/beta_agents.py index 4e692f17..157c5de4 100644 --- a/src/mistralai/client/beta_agents.py +++ b/src/mistralai/client/beta_agents.py @@ -2,15 +2,8 @@ # @generated-id: b64ad29b7174 from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - agentcreationrequest as models_agentcreationrequest, - agentupdaterequest as models_agentupdaterequest, - completionargs as models_completionargs, - getagentop as models_getagentop, - requestsource as models_requestsource, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -28,15 +21,12 @@ def create( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_agentcreationrequest.AgentCreationRequestTool], - List[models_agentcreationrequest.AgentCreationRequestToolTypedDict], + List[models.CreateAgentRequestTool], + List[models.CreateAgentRequestToolTypedDict], ] ] = None, completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, @@ -75,10 +65,10 @@ def create( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentCreationRequest( + request = models.CreateAgentRequest( instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentCreationRequestTool]] + tools, Optional[List[models.CreateAgentRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] @@ -105,7 +95,7 @@ def create( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.AgentCreationRequest + request, False, False, "json", models.CreateAgentRequest ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -123,7 +113,7 @@ def create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateAgent", + operation_id="agents_api_v1_agents_create", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -139,17 +129,17 @@ def create( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def create_async( self, @@ -159,15 +149,12 @@ async def create_async( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_agentcreationrequest.AgentCreationRequestTool], - List[models_agentcreationrequest.AgentCreationRequestToolTypedDict], + List[models.CreateAgentRequestTool], + List[models.CreateAgentRequestToolTypedDict], ] ] = None, completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, @@ -206,10 +193,10 @@ async def create_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentCreationRequest( + request = models.CreateAgentRequest( instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentCreationRequestTool]] + tools, Optional[List[models.CreateAgentRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] @@ -236,7 +223,7 @@ async def create_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.AgentCreationRequest + request, False, False, "json", models.CreateAgentRequest ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -254,7 +241,7 @@ async def create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateAgent", + operation_id="agents_api_v1_agents_create", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -270,17 +257,17 @@ async def create_async( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def list( self, @@ -288,7 +275,7 @@ def list( page: Optional[int] = 0, page_size: Optional[int] = 20, deployment_chat: OptionalNullable[bool] = UNSET, - sources: OptionalNullable[List[models_requestsource.RequestSource]] = UNSET, + sources: OptionalNullable[List[models.RequestSource]] = UNSET, name: OptionalNullable[str] = UNSET, search: OptionalNullable[str] = UNSET, id: OptionalNullable[str] = UNSET, @@ -325,7 +312,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.ListAgentsRequest( + request = models.AgentsAPIV1AgentsListRequest( page=page, page_size=page_size, deployment_chat=deployment_chat, @@ -365,7 +352,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListAgents", + operation_id="agents_api_v1_agents_list", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -381,17 +368,17 @@ def list( return unmarshal_json_response(List[models.Agent], http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -399,7 +386,7 @@ async def list_async( page: Optional[int] = 0, page_size: Optional[int] = 20, deployment_chat: OptionalNullable[bool] = UNSET, - sources: OptionalNullable[List[models_requestsource.RequestSource]] = UNSET, + sources: OptionalNullable[List[models.RequestSource]] = UNSET, name: OptionalNullable[str] = UNSET, search: OptionalNullable[str] = UNSET, id: OptionalNullable[str] = UNSET, @@ -436,7 +423,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ListAgentsRequest( + request = models.AgentsAPIV1AgentsListRequest( page=page, page_size=page_size, deployment_chat=deployment_chat, @@ -476,7 +463,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListAgents", + operation_id="agents_api_v1_agents_list", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -492,17 +479,17 @@ async def list_async( return unmarshal_json_response(List[models.Agent], http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get( self, @@ -510,8 +497,8 @@ def get( agent_id: str, agent_version: OptionalNullable[ Union[ - models_getagentop.GetAgentAgentVersion, - models_getagentop.GetAgentAgentVersionTypedDict, + models.AgentsAPIV1AgentsGetAgentVersion, + models.AgentsAPIV1AgentsGetAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -540,7 +527,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.GetAgentRequest( + request = models.AgentsAPIV1AgentsGetRequest( agent_id=agent_id, agent_version=agent_version, ) @@ -574,7 +561,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetAgent", + operation_id="agents_api_v1_agents_get", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -590,17 +577,17 @@ def get( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -608,8 +595,8 @@ async def get_async( agent_id: str, agent_version: OptionalNullable[ Union[ - models_getagentop.GetAgentAgentVersion, - models_getagentop.GetAgentAgentVersionTypedDict, + models.AgentsAPIV1AgentsGetAgentVersion, + models.AgentsAPIV1AgentsGetAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -638,7 +625,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetAgentRequest( + request = models.AgentsAPIV1AgentsGetRequest( agent_id=agent_id, agent_version=agent_version, ) @@ -672,7 +659,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetAgent", + operation_id="agents_api_v1_agents_get", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -688,17 +675,17 @@ async def get_async( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def update( self, @@ -707,15 +694,12 @@ def update( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_agentupdaterequest.AgentUpdateRequestTool], - List[models_agentupdaterequest.AgentUpdateRequestToolTypedDict], + List[models.UpdateAgentRequestTool], + List[models.UpdateAgentRequestToolTypedDict], ] ] = None, completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, model: OptionalNullable[str] = UNSET, name: OptionalNullable[str] = UNSET, @@ -759,12 +743,12 @@ def update( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateAgentRequest( + request = models.AgentsAPIV1AgentsUpdateRequest( agent_id=agent_id, - agent_update_request=models.AgentUpdateRequest( + update_agent_request=models.UpdateAgentRequest( instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentUpdateRequestTool]] + tools, Optional[List[models.UpdateAgentRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] @@ -793,11 +777,11 @@ def update( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.agent_update_request, + request.update_agent_request, False, False, "json", - models.AgentUpdateRequest, + models.UpdateAgentRequest, ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -815,7 +799,7 @@ def update( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateAgent", + operation_id="agents_api_v1_agents_update", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -831,17 +815,17 @@ def update( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def update_async( self, @@ -850,15 +834,12 @@ async def update_async( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_agentupdaterequest.AgentUpdateRequestTool], - List[models_agentupdaterequest.AgentUpdateRequestToolTypedDict], + List[models.UpdateAgentRequestTool], + List[models.UpdateAgentRequestToolTypedDict], ] ] = None, completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, model: OptionalNullable[str] = UNSET, name: OptionalNullable[str] = UNSET, @@ -902,12 +883,12 @@ async def update_async( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateAgentRequest( + request = models.AgentsAPIV1AgentsUpdateRequest( agent_id=agent_id, - agent_update_request=models.AgentUpdateRequest( + update_agent_request=models.UpdateAgentRequest( instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentUpdateRequestTool]] + tools, Optional[List[models.UpdateAgentRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] @@ -936,11 +917,11 @@ async def update_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.agent_update_request, + request.update_agent_request, False, False, "json", - models.AgentUpdateRequest, + models.UpdateAgentRequest, ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -958,7 +939,7 @@ async def update_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateAgent", + operation_id="agents_api_v1_agents_update", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -974,17 +955,17 @@ async def update_async( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, @@ -1013,7 +994,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteAgentRequest( + request = models.AgentsAPIV1AgentsDeleteRequest( agent_id=agent_id, ) @@ -1046,7 +1027,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteAgent", + operation_id="agents_api_v1_agents_delete", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1062,17 +1043,17 @@ def delete( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -1101,7 +1082,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteAgentRequest( + request = models.AgentsAPIV1AgentsDeleteRequest( agent_id=agent_id, ) @@ -1134,7 +1115,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteAgent", + operation_id="agents_api_v1_agents_delete", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1150,17 +1131,17 @@ async def delete_async( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def update_version( self, @@ -1193,7 +1174,7 @@ def update_version( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateAgentVersionRequest( + request = models.AgentsAPIV1AgentsUpdateVersionRequest( agent_id=agent_id, version=version, ) @@ -1227,7 +1208,7 @@ def update_version( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateAgentVersion", + operation_id="agents_api_v1_agents_update_version", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1243,17 +1224,17 @@ def update_version( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def update_version_async( self, @@ -1286,7 +1267,7 @@ async def update_version_async( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateAgentVersionRequest( + request = models.AgentsAPIV1AgentsUpdateVersionRequest( agent_id=agent_id, version=version, ) @@ -1320,7 +1301,7 @@ async def update_version_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateAgentVersion", + operation_id="agents_api_v1_agents_update_version", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1336,17 +1317,17 @@ async def update_version_async( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def list_versions( self, @@ -1381,7 +1362,7 @@ def list_versions( else: base_url = self._get_url(base_url, url_variables) - request = models.ListAgentVersionsRequest( + request = models.AgentsAPIV1AgentsListVersionsRequest( agent_id=agent_id, page=page, page_size=page_size, @@ -1416,7 +1397,7 @@ def list_versions( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListAgentVersions", + operation_id="agents_api_v1_agents_list_versions", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1432,17 +1413,17 @@ def list_versions( return unmarshal_json_response(List[models.Agent], http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_versions_async( self, @@ -1477,7 +1458,7 @@ async def list_versions_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ListAgentVersionsRequest( + request = models.AgentsAPIV1AgentsListVersionsRequest( agent_id=agent_id, page=page, page_size=page_size, @@ -1512,7 +1493,7 @@ async def list_versions_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListAgentVersions", + operation_id="agents_api_v1_agents_list_versions", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1528,17 +1509,17 @@ async def list_versions_async( return unmarshal_json_response(List[models.Agent], http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get_version( self, @@ -1571,7 +1552,7 @@ def get_version( else: base_url = self._get_url(base_url, url_variables) - request = models.GetAgentVersionRequest( + request = models.AgentsAPIV1AgentsGetVersionRequest( agent_id=agent_id, version=version, ) @@ -1605,7 +1586,7 @@ def get_version( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetAgentVersion", + operation_id="agents_api_v1_agents_get_version", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1621,17 +1602,17 @@ def get_version( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_version_async( self, @@ -1664,7 +1645,7 @@ async def get_version_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetAgentVersionRequest( + request = models.AgentsAPIV1AgentsGetVersionRequest( agent_id=agent_id, version=version, ) @@ -1698,7 +1679,7 @@ async def get_version_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetAgentVersion", + operation_id="agents_api_v1_agents_get_version", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1714,17 +1695,17 @@ async def get_version_async( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def create_version_alias( self, @@ -1759,7 +1740,7 @@ def create_version_alias( else: base_url = self._get_url(base_url, url_variables) - request = models.CreateOrUpdateAgentAliasRequest( + request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( agent_id=agent_id, alias=alias, version=version, @@ -1794,7 +1775,7 @@ def create_version_alias( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateOrUpdateAgentAlias", + operation_id="agents_api_v1_agents_create_or_update_alias", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1810,17 +1791,17 @@ def create_version_alias( return unmarshal_json_response(models.AgentAliasResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def create_version_alias_async( self, @@ -1855,7 +1836,7 @@ async def create_version_alias_async( else: base_url = self._get_url(base_url, url_variables) - request = models.CreateOrUpdateAgentAliasRequest( + request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( agent_id=agent_id, alias=alias, version=version, @@ -1890,7 +1871,7 @@ async def create_version_alias_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateOrUpdateAgentAlias", + operation_id="agents_api_v1_agents_create_or_update_alias", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1906,17 +1887,17 @@ async def create_version_alias_async( return unmarshal_json_response(models.AgentAliasResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def list_version_aliases( self, @@ -1947,7 +1928,7 @@ def list_version_aliases( else: base_url = self._get_url(base_url, url_variables) - request = models.ListAgentAliasesRequest( + request = models.AgentsAPIV1AgentsListVersionAliasesRequest( agent_id=agent_id, ) @@ -1980,7 +1961,7 @@ def list_version_aliases( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListAgentAliases", + operation_id="agents_api_v1_agents_list_version_aliases", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1996,17 +1977,17 @@ def list_version_aliases( return unmarshal_json_response(List[models.AgentAliasResponse], http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_version_aliases_async( self, @@ -2037,7 +2018,7 @@ async def list_version_aliases_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ListAgentAliasesRequest( + request = models.AgentsAPIV1AgentsListVersionAliasesRequest( agent_id=agent_id, ) @@ -2070,7 +2051,7 @@ async def list_version_aliases_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListAgentAliases", + operation_id="agents_api_v1_agents_list_version_aliases", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2086,17 +2067,17 @@ async def list_version_aliases_async( return unmarshal_json_response(List[models.AgentAliasResponse], http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def delete_version_alias( self, @@ -2129,7 +2110,7 @@ def delete_version_alias( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteAgentAliasRequest( + request = models.AgentsAPIV1AgentsDeleteAliasRequest( agent_id=agent_id, alias=alias, ) @@ -2163,7 +2144,7 @@ def delete_version_alias( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteAgentAlias", + operation_id="agents_api_v1_agents_delete_alias", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2179,17 +2160,17 @@ def delete_version_alias( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def delete_version_alias_async( self, @@ -2222,7 +2203,7 @@ async def delete_version_alias_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteAgentAliasRequest( + request = models.AgentsAPIV1AgentsDeleteAliasRequest( agent_id=agent_id, alias=alias, ) @@ -2256,7 +2237,7 @@ async def delete_version_alias_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteAgentAlias", + operation_id="agents_api_v1_agents_delete_alias", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2272,14 +2253,14 @@ async def delete_version_alias_async( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/chat.py b/src/mistralai/client/chat.py index 35698d32..13b9c01f 100644 --- a/src/mistralai/client/chat.py +++ b/src/mistralai/client/chat.py @@ -2,16 +2,8 @@ # @generated-id: 7eba0f088d47 from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - chatcompletionrequest as models_chatcompletionrequest, - chatcompletionstreamrequest as models_chatcompletionstreamrequest, - mistralpromptmode as models_mistralpromptmode, - prediction as models_prediction, - responseformat as models_responseformat, - tool as models_tool, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import eventstreaming, get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -111,8 +103,8 @@ def complete( *, model: str, messages: Union[ - List[models_chatcompletionrequest.ChatCompletionRequestMessage], - List[models_chatcompletionrequest.ChatCompletionRequestMessageTypedDict], + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -120,37 +112,32 @@ def complete( stream: Optional[bool] = False, stop: Optional[ Union[ - models_chatcompletionrequest.ChatCompletionRequestStop, - models_chatcompletionrequest.ChatCompletionRequestStopTypedDict, + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + Union[List[models.Tool], List[models.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models_chatcompletionrequest.ChatCompletionRequestToolChoice, - models_chatcompletionrequest.ChatCompletionRequestToolChoiceTypedDict, + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -271,25 +258,25 @@ def complete( return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def complete_async( self, *, model: str, messages: Union[ - List[models_chatcompletionrequest.ChatCompletionRequestMessage], - List[models_chatcompletionrequest.ChatCompletionRequestMessageTypedDict], + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -297,37 +284,32 @@ async def complete_async( stream: Optional[bool] = False, stop: Optional[ Union[ - models_chatcompletionrequest.ChatCompletionRequestStop, - models_chatcompletionrequest.ChatCompletionRequestStopTypedDict, + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + Union[List[models.Tool], List[models.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models_chatcompletionrequest.ChatCompletionRequestToolChoice, - models_chatcompletionrequest.ChatCompletionRequestToolChoiceTypedDict, + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -448,27 +430,25 @@ async def complete_async( return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def stream( self, *, model: str, messages: Union[ - List[models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessage], - List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessageTypedDict - ], + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -476,37 +456,32 @@ def stream( stream: Optional[bool] = True, stop: Optional[ Union[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestStop, - models_chatcompletionstreamrequest.ChatCompletionStreamRequestStopTypedDict, + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + Union[List[models.Tool], List[models.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoice, - models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoiceTypedDict, + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -636,28 +611,26 @@ def stream( if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, *, model: str, messages: Union[ - List[models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessage], - List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessageTypedDict - ], + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -665,37 +638,32 @@ async def stream_async( stream: Optional[bool] = True, stop: Optional[ Union[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestStop, - models_chatcompletionstreamrequest.ChatCompletionStreamRequestStopTypedDict, + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + Union[List[models.Tool], List[models.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoice, - models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoiceTypedDict, + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -825,15 +793,15 @@ async def stream_async( if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/classifiers.py b/src/mistralai/client/classifiers.py index 3407c4b7..67199b60 100644 --- a/src/mistralai/client/classifiers.py +++ b/src/mistralai/client/classifiers.py @@ -2,13 +2,8 @@ # @generated-id: 26e773725732 from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - chatmoderationrequest as models_chatmoderationrequest, - classificationrequest as models_classificationrequest, - inputs as models_inputs, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -23,8 +18,8 @@ def moderate( *, model: str, inputs: Union[ - models_classificationrequest.ClassificationRequestInputs, - models_classificationrequest.ClassificationRequestInputsTypedDict, + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, ], metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -106,25 +101,25 @@ def moderate( return unmarshal_json_response(models.ModerationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def moderate_async( self, *, model: str, inputs: Union[ - models_classificationrequest.ClassificationRequestInputs, - models_classificationrequest.ClassificationRequestInputsTypedDict, + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, ], metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -206,24 +201,24 @@ async def moderate_async( return unmarshal_json_response(models.ModerationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def moderate_chat( self, *, inputs: Union[ - models_chatmoderationrequest.ChatModerationRequestInputs3, - models_chatmoderationrequest.ChatModerationRequestInputs3TypedDict, + models.ChatModerationRequestInputs3, + models.ChatModerationRequestInputs3TypedDict, ], model: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -305,24 +300,24 @@ def moderate_chat( return unmarshal_json_response(models.ModerationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def moderate_chat_async( self, *, inputs: Union[ - models_chatmoderationrequest.ChatModerationRequestInputs3, - models_chatmoderationrequest.ChatModerationRequestInputs3TypedDict, + models.ChatModerationRequestInputs3, + models.ChatModerationRequestInputs3TypedDict, ], model: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -404,25 +399,25 @@ async def moderate_chat_async( return unmarshal_json_response(models.ModerationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def classify( self, *, model: str, inputs: Union[ - models_classificationrequest.ClassificationRequestInputs, - models_classificationrequest.ClassificationRequestInputsTypedDict, + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, ], metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -504,25 +499,25 @@ def classify( return unmarshal_json_response(models.ClassificationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def classify_async( self, *, model: str, inputs: Union[ - models_classificationrequest.ClassificationRequestInputs, - models_classificationrequest.ClassificationRequestInputsTypedDict, + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, ], metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -604,23 +599,23 @@ async def classify_async( return unmarshal_json_response(models.ClassificationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def classify_chat( self, *, model: str, - inputs: Union[models_inputs.Inputs, models_inputs.InputsTypedDict], + input: Union[models.Inputs, models.InputsTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -629,7 +624,7 @@ def classify_chat( r"""Chat Classifications :param model: - :param inputs: Chat to classify + :param input: Chat to classify :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -647,7 +642,7 @@ def classify_chat( request = models.ChatClassificationRequest( model=model, - inputs=utils.get_pydantic_model(inputs, models.Inputs), + input=utils.get_pydantic_model(input, models.Inputs), ) req = self._build_request( @@ -698,23 +693,23 @@ def classify_chat( return unmarshal_json_response(models.ClassificationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def classify_chat_async( self, *, model: str, - inputs: Union[models_inputs.Inputs, models_inputs.InputsTypedDict], + input: Union[models.Inputs, models.InputsTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -723,7 +718,7 @@ async def classify_chat_async( r"""Chat Classifications :param model: - :param inputs: Chat to classify + :param input: Chat to classify :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -741,7 +736,7 @@ async def classify_chat_async( request = models.ChatClassificationRequest( model=model, - inputs=utils.get_pydantic_model(inputs, models.Inputs), + input=utils.get_pydantic_model(input, models.Inputs), ) req = self._build_request_async( @@ -792,14 +787,14 @@ async def classify_chat_async( return unmarshal_json_response(models.ClassificationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/conversations.py b/src/mistralai/client/conversations.py index 646b91f3..ec33b1fb 100644 --- a/src/mistralai/client/conversations.py +++ b/src/mistralai/client/conversations.py @@ -2,18 +2,8 @@ # @generated-id: 40692a878064 from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - completionargs as models_completionargs, - conversationappendrequest as models_conversationappendrequest, - conversationappendstreamrequest as models_conversationappendstreamrequest, - conversationinputs as models_conversationinputs, - conversationrequest as models_conversationrequest, - conversationrestartrequest as models_conversationrestartrequest, - conversationrestartstreamrequest as models_conversationrestartstreamrequest, - conversationstreamrequest as models_conversationstreamrequest, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import eventstreaming, get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -238,27 +228,21 @@ async def run_generator() -> ( def start( self, *, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], stream: Optional[bool] = False, store: OptionalNullable[bool] = UNSET, handoff_execution: OptionalNullable[ - models_conversationrequest.ConversationRequestHandoffExecution + models.ConversationRequestHandoffExecution ] = UNSET, instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_conversationrequest.ConversationRequestTool], - List[models_conversationrequest.ConversationRequestToolTypedDict], + List[models.ConversationRequestTool], + List[models.ConversationRequestToolTypedDict], ] ] = None, completion_args: OptionalNullable[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, @@ -266,8 +250,8 @@ def start( agent_id: OptionalNullable[str] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationrequest.ConversationRequestAgentVersion, - models_conversationrequest.ConversationRequestAgentVersionTypedDict, + models.ConversationRequestAgentVersion, + models.ConversationRequestAgentVersionTypedDict, ] ] = UNSET, model: OptionalNullable[str] = UNSET, @@ -360,7 +344,7 @@ def start( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="StartConversation", + operation_id="agents_api_v1_conversations_start", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -376,42 +360,36 @@ def start( return unmarshal_json_response(models.ConversationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def start_async( self, *, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], stream: Optional[bool] = False, store: OptionalNullable[bool] = UNSET, handoff_execution: OptionalNullable[ - models_conversationrequest.ConversationRequestHandoffExecution + models.ConversationRequestHandoffExecution ] = UNSET, instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_conversationrequest.ConversationRequestTool], - List[models_conversationrequest.ConversationRequestToolTypedDict], + List[models.ConversationRequestTool], + List[models.ConversationRequestToolTypedDict], ] ] = None, completion_args: OptionalNullable[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, @@ -419,8 +397,8 @@ async def start_async( agent_id: OptionalNullable[str] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationrequest.ConversationRequestAgentVersion, - models_conversationrequest.ConversationRequestAgentVersionTypedDict, + models.ConversationRequestAgentVersion, + models.ConversationRequestAgentVersionTypedDict, ] ] = UNSET, model: OptionalNullable[str] = UNSET, @@ -513,7 +491,7 @@ async def start_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="StartConversation", + operation_id="agents_api_v1_conversations_start", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -529,17 +507,17 @@ async def start_async( return unmarshal_json_response(models.ConversationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def list( self, @@ -551,7 +529,7 @@ def list( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.ListConversationsResponse]: + ) -> List[models.AgentsAPIV1ConversationsListResponse]: r"""List all created conversations. Retrieve a list of conversation entities sorted by creation time. @@ -574,7 +552,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.ListConversationsRequest( + request = models.AgentsAPIV1ConversationsListRequest( page=page, page_size=page_size, metadata=metadata, @@ -609,7 +587,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListConversations", + operation_id="agents_api_v1_conversations_list", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -623,21 +601,21 @@ def list( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return unmarshal_json_response( - List[models.ListConversationsResponse], http_res + List[models.AgentsAPIV1ConversationsListResponse], http_res ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -649,7 +627,7 @@ async def list_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.ListConversationsResponse]: + ) -> List[models.AgentsAPIV1ConversationsListResponse]: r"""List all created conversations. Retrieve a list of conversation entities sorted by creation time. @@ -672,7 +650,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ListConversationsRequest( + request = models.AgentsAPIV1ConversationsListRequest( page=page, page_size=page_size, metadata=metadata, @@ -707,7 +685,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListConversations", + operation_id="agents_api_v1_conversations_list", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -721,21 +699,21 @@ async def list_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return unmarshal_json_response( - List[models.ListConversationsResponse], http_res + List[models.AgentsAPIV1ConversationsListResponse], http_res ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get( self, @@ -766,7 +744,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.GetConversationRequest( + request = models.AgentsAPIV1ConversationsGetRequest( conversation_id=conversation_id, ) @@ -799,7 +777,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetConversation", + operation_id="agents_api_v1_conversations_get", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -815,17 +793,17 @@ def get( return unmarshal_json_response(models.ResponseV1ConversationsGet, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -856,7 +834,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetConversationRequest( + request = models.AgentsAPIV1ConversationsGetRequest( conversation_id=conversation_id, ) @@ -889,7 +867,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetConversation", + operation_id="agents_api_v1_conversations_get", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -905,17 +883,17 @@ async def get_async( return unmarshal_json_response(models.ResponseV1ConversationsGet, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, @@ -946,7 +924,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteConversationRequest( + request = models.AgentsAPIV1ConversationsDeleteRequest( conversation_id=conversation_id, ) @@ -979,7 +957,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteConversation", + operation_id="agents_api_v1_conversations_delete", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -995,17 +973,17 @@ def delete( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -1036,7 +1014,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteConversationRequest( + request = models.AgentsAPIV1ConversationsDeleteRequest( conversation_id=conversation_id, ) @@ -1069,7 +1047,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteConversation", + operation_id="agents_api_v1_conversations_delete", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1085,37 +1063,39 @@ async def delete_async( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def append( self, *, conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, stream: Optional[bool] = False, store: Optional[bool] = True, handoff_execution: Optional[ - models_conversationappendrequest.ConversationAppendRequestHandoffExecution + models.ConversationAppendRequestHandoffExecution ] = "server", completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + tool_confirmations: OptionalNullable[ Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, + List[models.ToolCallConfirmation], + List[models.ToolCallConfirmationTypedDict], ] - ] = None, + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1131,6 +1111,7 @@ def append( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param tool_confirmations: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1146,16 +1127,22 @@ def append( else: base_url = self._get_url(base_url, url_variables) - request = models.AppendConversationRequest( + request = models.AgentsAPIV1ConversationsAppendRequest( conversation_id=conversation_id, conversation_append_request=models.ConversationAppendRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), stream=stream, store=store, handoff_execution=handoff_execution, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + tool_confirmations=utils.get_pydantic_model( + tool_confirmations, + OptionalNullable[List[models.ToolCallConfirmation]], + ), ), ) @@ -1195,7 +1182,7 @@ def append( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="AppendConversation", + operation_id="agents_api_v1_conversations_append", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1211,37 +1198,39 @@ def append( return unmarshal_json_response(models.ConversationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def append_async( self, *, conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, stream: Optional[bool] = False, store: Optional[bool] = True, handoff_execution: Optional[ - models_conversationappendrequest.ConversationAppendRequestHandoffExecution + models.ConversationAppendRequestHandoffExecution ] = "server", completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + tool_confirmations: OptionalNullable[ Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, + List[models.ToolCallConfirmation], + List[models.ToolCallConfirmationTypedDict], ] - ] = None, + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1257,6 +1246,7 @@ async def append_async( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param tool_confirmations: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1272,16 +1262,22 @@ async def append_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AppendConversationRequest( + request = models.AgentsAPIV1ConversationsAppendRequest( conversation_id=conversation_id, conversation_append_request=models.ConversationAppendRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), stream=stream, store=store, handoff_execution=handoff_execution, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + tool_confirmations=utils.get_pydantic_model( + tool_confirmations, + OptionalNullable[List[models.ToolCallConfirmation]], + ), ), ) @@ -1321,7 +1317,7 @@ async def append_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="AppendConversation", + operation_id="agents_api_v1_conversations_append", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1337,17 +1333,17 @@ async def append_async( return unmarshal_json_response(models.ConversationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get_history( self, @@ -1378,7 +1374,7 @@ def get_history( else: base_url = self._get_url(base_url, url_variables) - request = models.GetConversationHistoryRequest( + request = models.AgentsAPIV1ConversationsHistoryRequest( conversation_id=conversation_id, ) @@ -1411,7 +1407,7 @@ def get_history( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetConversationHistory", + operation_id="agents_api_v1_conversations_history", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1427,17 +1423,17 @@ def get_history( return unmarshal_json_response(models.ConversationHistory, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_history_async( self, @@ -1468,7 +1464,7 @@ async def get_history_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetConversationHistoryRequest( + request = models.AgentsAPIV1ConversationsHistoryRequest( conversation_id=conversation_id, ) @@ -1501,7 +1497,7 @@ async def get_history_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetConversationHistory", + operation_id="agents_api_v1_conversations_history", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1517,17 +1513,17 @@ async def get_history_async( return unmarshal_json_response(models.ConversationHistory, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get_messages( self, @@ -1558,7 +1554,7 @@ def get_messages( else: base_url = self._get_url(base_url, url_variables) - request = models.GetConversationMessagesRequest( + request = models.AgentsAPIV1ConversationsMessagesRequest( conversation_id=conversation_id, ) @@ -1591,7 +1587,7 @@ def get_messages( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetConversationMessages", + operation_id="agents_api_v1_conversations_messages", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1607,17 +1603,17 @@ def get_messages( return unmarshal_json_response(models.ConversationMessages, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_messages_async( self, @@ -1648,7 +1644,7 @@ async def get_messages_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetConversationMessagesRequest( + request = models.AgentsAPIV1ConversationsMessagesRequest( conversation_id=conversation_id, ) @@ -1681,7 +1677,7 @@ async def get_messages_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetConversationMessages", + operation_id="agents_api_v1_conversations_messages", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1697,43 +1693,39 @@ async def get_messages_async( return unmarshal_json_response(models.ConversationMessages, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def restart( self, *, conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], from_entry_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, stream: Optional[bool] = False, store: Optional[bool] = True, handoff_execution: Optional[ - models_conversationrestartrequest.ConversationRestartRequestHandoffExecution + models.ConversationRestartRequestHandoffExecution ] = "server", completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationrestartrequest.ConversationRestartRequestAgentVersion, - models_conversationrestartrequest.ConversationRestartRequestAgentVersionTypedDict, + models.ConversationRestartRequestAgentVersion, + models.ConversationRestartRequestAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -1746,8 +1738,8 @@ def restart( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: :param from_entry_id: + :param inputs: :param stream: :param store: Whether to store the results into our servers or not. :param handoff_execution: @@ -1769,10 +1761,12 @@ def restart( else: base_url = self._get_url(base_url, url_variables) - request = models.RestartConversationRequest( + request = models.AgentsAPIV1ConversationsRestartRequest( conversation_id=conversation_id, conversation_restart_request=models.ConversationRestartRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), stream=stream, store=store, handoff_execution=handoff_execution, @@ -1821,7 +1815,7 @@ def restart( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="RestartConversation", + operation_id="agents_api_v1_conversations_restart", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1837,43 +1831,39 @@ def restart( return unmarshal_json_response(models.ConversationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def restart_async( self, *, conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], from_entry_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, stream: Optional[bool] = False, store: Optional[bool] = True, handoff_execution: Optional[ - models_conversationrestartrequest.ConversationRestartRequestHandoffExecution + models.ConversationRestartRequestHandoffExecution ] = "server", completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationrestartrequest.ConversationRestartRequestAgentVersion, - models_conversationrestartrequest.ConversationRestartRequestAgentVersionTypedDict, + models.ConversationRestartRequestAgentVersion, + models.ConversationRestartRequestAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -1886,8 +1876,8 @@ async def restart_async( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: :param from_entry_id: + :param inputs: :param stream: :param store: Whether to store the results into our servers or not. :param handoff_execution: @@ -1909,10 +1899,12 @@ async def restart_async( else: base_url = self._get_url(base_url, url_variables) - request = models.RestartConversationRequest( + request = models.AgentsAPIV1ConversationsRestartRequest( conversation_id=conversation_id, conversation_restart_request=models.ConversationRestartRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), stream=stream, store=store, handoff_execution=handoff_execution, @@ -1961,7 +1953,7 @@ async def restart_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="RestartConversation", + operation_id="agents_api_v1_conversations_restart", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1977,44 +1969,36 @@ async def restart_async( return unmarshal_json_response(models.ConversationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def start_stream( self, *, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], stream: Optional[bool] = True, store: OptionalNullable[bool] = UNSET, handoff_execution: OptionalNullable[ - models_conversationstreamrequest.ConversationStreamRequestHandoffExecution + models.ConversationStreamRequestHandoffExecution ] = UNSET, instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_conversationstreamrequest.ConversationStreamRequestTool], - List[ - models_conversationstreamrequest.ConversationStreamRequestToolTypedDict - ], + List[models.ConversationStreamRequestTool], + List[models.ConversationStreamRequestToolTypedDict], ] ] = None, completion_args: OptionalNullable[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, @@ -2022,8 +2006,8 @@ def start_stream( agent_id: OptionalNullable[str] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationstreamrequest.ConversationStreamRequestAgentVersion, - models_conversationstreamrequest.ConversationStreamRequestAgentVersionTypedDict, + models.ConversationStreamRequestAgentVersion, + models.ConversationStreamRequestAgentVersionTypedDict, ] ] = UNSET, model: OptionalNullable[str] = UNSET, @@ -2116,7 +2100,7 @@ def start_stream( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="StartConversationStream", + operation_id="agents_api_v1_conversations_start_stream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2138,45 +2122,37 @@ def start_stream( if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def start_stream_async( self, *, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], stream: Optional[bool] = True, store: OptionalNullable[bool] = UNSET, handoff_execution: OptionalNullable[ - models_conversationstreamrequest.ConversationStreamRequestHandoffExecution + models.ConversationStreamRequestHandoffExecution ] = UNSET, instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_conversationstreamrequest.ConversationStreamRequestTool], - List[ - models_conversationstreamrequest.ConversationStreamRequestToolTypedDict - ], + List[models.ConversationStreamRequestTool], + List[models.ConversationStreamRequestToolTypedDict], ] ] = None, completion_args: OptionalNullable[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, @@ -2184,8 +2160,8 @@ async def start_stream_async( agent_id: OptionalNullable[str] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationstreamrequest.ConversationStreamRequestAgentVersion, - models_conversationstreamrequest.ConversationStreamRequestAgentVersionTypedDict, + models.ConversationStreamRequestAgentVersion, + models.ConversationStreamRequestAgentVersionTypedDict, ] ] = UNSET, model: OptionalNullable[str] = UNSET, @@ -2278,7 +2254,7 @@ async def start_stream_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="StartConversationStream", + operation_id="agents_api_v1_conversations_start_stream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2300,38 +2276,40 @@ async def start_stream_async( if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) def append_stream( self, *, conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, stream: Optional[bool] = True, store: Optional[bool] = True, handoff_execution: Optional[ - models_conversationappendstreamrequest.ConversationAppendStreamRequestHandoffExecution + models.ConversationAppendStreamRequestHandoffExecution ] = "server", completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + tool_confirmations: OptionalNullable[ Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, + List[models.ToolCallConfirmation], + List[models.ToolCallConfirmationTypedDict], ] - ] = None, + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -2347,6 +2325,7 @@ def append_stream( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param tool_confirmations: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -2362,16 +2341,22 @@ def append_stream( else: base_url = self._get_url(base_url, url_variables) - request = models.AppendConversationStreamRequest( + request = models.AgentsAPIV1ConversationsAppendStreamRequest( conversation_id=conversation_id, conversation_append_stream_request=models.ConversationAppendStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), stream=stream, store=store, handoff_execution=handoff_execution, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + tool_confirmations=utils.get_pydantic_model( + tool_confirmations, + OptionalNullable[List[models.ToolCallConfirmation]], + ), ), ) @@ -2411,7 +2396,7 @@ def append_stream( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="AppendConversationStream", + operation_id="agents_api_v1_conversations_append_stream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2433,38 +2418,40 @@ def append_stream( if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def append_stream_async( self, *, conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, stream: Optional[bool] = True, store: Optional[bool] = True, handoff_execution: Optional[ - models_conversationappendstreamrequest.ConversationAppendStreamRequestHandoffExecution + models.ConversationAppendStreamRequestHandoffExecution ] = "server", completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + tool_confirmations: OptionalNullable[ Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, + List[models.ToolCallConfirmation], + List[models.ToolCallConfirmationTypedDict], ] - ] = None, + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -2480,6 +2467,7 @@ async def append_stream_async( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param tool_confirmations: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -2495,16 +2483,22 @@ async def append_stream_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AppendConversationStreamRequest( + request = models.AgentsAPIV1ConversationsAppendStreamRequest( conversation_id=conversation_id, conversation_append_stream_request=models.ConversationAppendStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), stream=stream, store=store, handoff_execution=handoff_execution, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + tool_confirmations=utils.get_pydantic_model( + tool_confirmations, + OptionalNullable[List[models.ToolCallConfirmation]], + ), ), ) @@ -2544,7 +2538,7 @@ async def append_stream_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="AppendConversationStream", + operation_id="agents_api_v1_conversations_append_stream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2566,44 +2560,40 @@ async def append_stream_async( if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) def restart_stream( self, *, conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], from_entry_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, stream: Optional[bool] = True, store: Optional[bool] = True, handoff_execution: Optional[ - models_conversationrestartstreamrequest.ConversationRestartStreamRequestHandoffExecution + models.ConversationRestartStreamRequestHandoffExecution ] = "server", completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersion, - models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersionTypedDict, + models.ConversationRestartStreamRequestAgentVersion, + models.ConversationRestartStreamRequestAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -2616,8 +2606,8 @@ def restart_stream( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: :param from_entry_id: + :param inputs: :param stream: :param store: Whether to store the results into our servers or not. :param handoff_execution: @@ -2639,10 +2629,12 @@ def restart_stream( else: base_url = self._get_url(base_url, url_variables) - request = models.RestartConversationStreamRequest( + request = models.AgentsAPIV1ConversationsRestartStreamRequest( conversation_id=conversation_id, conversation_restart_stream_request=models.ConversationRestartStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), stream=stream, store=store, handoff_execution=handoff_execution, @@ -2691,7 +2683,7 @@ def restart_stream( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="RestartConversationStream", + operation_id="agents_api_v1_conversations_restart_stream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2713,44 +2705,40 @@ def restart_stream( if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def restart_stream_async( self, *, conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], from_entry_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, stream: Optional[bool] = True, store: Optional[bool] = True, handoff_execution: Optional[ - models_conversationrestartstreamrequest.ConversationRestartStreamRequestHandoffExecution + models.ConversationRestartStreamRequestHandoffExecution ] = "server", completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersion, - models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersionTypedDict, + models.ConversationRestartStreamRequestAgentVersion, + models.ConversationRestartStreamRequestAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -2763,8 +2751,8 @@ async def restart_stream_async( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: :param from_entry_id: + :param inputs: :param stream: :param store: Whether to store the results into our servers or not. :param handoff_execution: @@ -2786,10 +2774,12 @@ async def restart_stream_async( else: base_url = self._get_url(base_url, url_variables) - request = models.RestartConversationStreamRequest( + request = models.AgentsAPIV1ConversationsRestartStreamRequest( conversation_id=conversation_id, conversation_restart_stream_request=models.ConversationRestartStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), stream=stream, store=store, handoff_execution=handoff_execution, @@ -2838,7 +2828,7 @@ async def restart_stream_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="RestartConversationStream", + operation_id="agents_api_v1_conversations_restart_stream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2860,15 +2850,15 @@ async def restart_stream_async( if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/documents.py b/src/mistralai/client/documents.py index c78f2944..b3130364 100644 --- a/src/mistralai/client/documents.py +++ b/src/mistralai/client/documents.py @@ -2,12 +2,8 @@ # @generated-id: bcc17286c31c from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - documentupdatein as models_documentupdatein, - file as models_file, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -31,7 +27,7 @@ def list( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListDocumentOut: + ) -> models.ListDocumentsResponse: r"""List documents in a given library. Given a library, lists the document that have been uploaded to that library. @@ -58,7 +54,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.ListDocumentsRequest( + request = models.LibrariesDocumentsListV1Request( library_id=library_id, search=search, page_size=page_size, @@ -97,7 +93,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListDocuments", + operation_id="libraries_documents_list_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -110,20 +106,20 @@ def list( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListDocumentOut, http_res) + return unmarshal_json_response(models.ListDocumentsResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -139,7 +135,7 @@ async def list_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListDocumentOut: + ) -> models.ListDocumentsResponse: r"""List documents in a given library. Given a library, lists the document that have been uploaded to that library. @@ -166,7 +162,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ListDocumentsRequest( + request = models.LibrariesDocumentsListV1Request( library_id=library_id, search=search, page_size=page_size, @@ -205,7 +201,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListDocuments", + operation_id="libraries_documents_list_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -218,31 +214,31 @@ async def list_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListDocumentOut, http_res) + return unmarshal_json_response(models.ListDocumentsResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def upload( self, *, library_id: str, - file: Union[models_file.File, models_file.FileTypedDict], + file: Union[models.File, models.FileTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: + ) -> models.Document: r"""Upload a new document. Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search @@ -272,7 +268,7 @@ def upload( else: base_url = self._get_url(base_url, url_variables) - request = models.UploadDocumentRequest( + request = models.LibrariesDocumentsUploadV1Request( library_id=library_id, request_body=models.DocumentUpload( file=utils.get_pydantic_model(file, models.File), @@ -311,7 +307,7 @@ def upload( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UploadDocument", + operation_id="libraries_documents_upload_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -324,31 +320,31 @@ def upload( response_data: Any = None if utils.match_response(http_res, ["200", "201"], "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) + return unmarshal_json_response(models.Document, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def upload_async( self, *, library_id: str, - file: Union[models_file.File, models_file.FileTypedDict], + file: Union[models.File, models.FileTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: + ) -> models.Document: r"""Upload a new document. Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search @@ -378,7 +374,7 @@ async def upload_async( else: base_url = self._get_url(base_url, url_variables) - request = models.UploadDocumentRequest( + request = models.LibrariesDocumentsUploadV1Request( library_id=library_id, request_body=models.DocumentUpload( file=utils.get_pydantic_model(file, models.File), @@ -417,7 +413,7 @@ async def upload_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UploadDocument", + operation_id="libraries_documents_upload_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -430,20 +426,20 @@ async def upload_async( response_data: Any = None if utils.match_response(http_res, ["200", "201"], "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) + return unmarshal_json_response(models.Document, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get( self, @@ -454,7 +450,7 @@ def get( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: + ) -> models.Document: r"""Retrieve the metadata of a specific document. Given a library and a document in this library, you can retrieve the metadata of that document. @@ -476,7 +472,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentRequest( + request = models.LibrariesDocumentsGetV1Request( library_id=library_id, document_id=document_id, ) @@ -510,7 +506,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocument", + operation_id="libraries_documents_get_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -523,20 +519,20 @@ def get( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) + return unmarshal_json_response(models.Document, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -547,7 +543,7 @@ async def get_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: + ) -> models.Document: r"""Retrieve the metadata of a specific document. Given a library and a document in this library, you can retrieve the metadata of that document. @@ -569,7 +565,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentRequest( + request = models.LibrariesDocumentsGetV1Request( library_id=library_id, document_id=document_id, ) @@ -603,7 +599,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocument", + operation_id="libraries_documents_get_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -616,20 +612,20 @@ async def get_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) + return unmarshal_json_response(models.Document, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def update( self, @@ -638,16 +634,13 @@ def update( document_id: str, name: OptionalNullable[str] = UNSET, attributes: OptionalNullable[ - Union[ - Dict[str, models_documentupdatein.Attributes], - Dict[str, models_documentupdatein.AttributesTypedDict], - ] + Union[Dict[str, models.Attributes], Dict[str, models.AttributesTypedDict]] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: + ) -> models.Document: r"""Update the metadata of a specific document. Given a library and a document in that library, update the name of that document. @@ -671,10 +664,10 @@ def update( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateDocumentRequest( + request = models.LibrariesDocumentsUpdateV1Request( library_id=library_id, document_id=document_id, - document_update_in=models.DocumentUpdateIn( + update_document_request=models.UpdateDocumentRequest( name=name, attributes=attributes, ), @@ -694,11 +687,11 @@ def update( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.document_update_in, + request.update_document_request, False, False, "json", - models.DocumentUpdateIn, + models.UpdateDocumentRequest, ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -716,7 +709,7 @@ def update( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateDocument", + operation_id="libraries_documents_update_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -729,20 +722,20 @@ def update( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) + return unmarshal_json_response(models.Document, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def update_async( self, @@ -751,16 +744,13 @@ async def update_async( document_id: str, name: OptionalNullable[str] = UNSET, attributes: OptionalNullable[ - Union[ - Dict[str, models_documentupdatein.Attributes], - Dict[str, models_documentupdatein.AttributesTypedDict], - ] + Union[Dict[str, models.Attributes], Dict[str, models.AttributesTypedDict]] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: + ) -> models.Document: r"""Update the metadata of a specific document. Given a library and a document in that library, update the name of that document. @@ -784,10 +774,10 @@ async def update_async( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateDocumentRequest( + request = models.LibrariesDocumentsUpdateV1Request( library_id=library_id, document_id=document_id, - document_update_in=models.DocumentUpdateIn( + update_document_request=models.UpdateDocumentRequest( name=name, attributes=attributes, ), @@ -807,11 +797,11 @@ async def update_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.document_update_in, + request.update_document_request, False, False, "json", - models.DocumentUpdateIn, + models.UpdateDocumentRequest, ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -829,7 +819,7 @@ async def update_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateDocument", + operation_id="libraries_documents_update_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -842,20 +832,20 @@ async def update_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) + return unmarshal_json_response(models.Document, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, @@ -888,7 +878,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteDocumentRequest( + request = models.LibrariesDocumentsDeleteV1Request( library_id=library_id, document_id=document_id, ) @@ -922,7 +912,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteDocument", + operation_id="libraries_documents_delete_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -938,17 +928,17 @@ def delete( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -981,7 +971,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteDocumentRequest( + request = models.LibrariesDocumentsDeleteV1Request( library_id=library_id, document_id=document_id, ) @@ -1015,7 +1005,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteDocument", + operation_id="libraries_documents_delete_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1031,17 +1021,17 @@ async def delete_async( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def text_content( self, @@ -1074,7 +1064,7 @@ def text_content( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentTextContentRequest( + request = models.LibrariesDocumentsGetTextContentV1Request( library_id=library_id, document_id=document_id, ) @@ -1108,7 +1098,7 @@ def text_content( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocumentTextContent", + operation_id="libraries_documents_get_text_content_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1124,17 +1114,17 @@ def text_content( return unmarshal_json_response(models.DocumentTextContent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def text_content_async( self, @@ -1167,7 +1157,7 @@ async def text_content_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentTextContentRequest( + request = models.LibrariesDocumentsGetTextContentV1Request( library_id=library_id, document_id=document_id, ) @@ -1201,7 +1191,7 @@ async def text_content_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocumentTextContent", + operation_id="libraries_documents_get_text_content_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1217,17 +1207,17 @@ async def text_content_async( return unmarshal_json_response(models.DocumentTextContent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def status( self, @@ -1260,7 +1250,7 @@ def status( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentStatusRequest( + request = models.LibrariesDocumentsGetStatusV1Request( library_id=library_id, document_id=document_id, ) @@ -1294,7 +1284,7 @@ def status( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocumentStatus", + operation_id="libraries_documents_get_status_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1310,17 +1300,17 @@ def status( return unmarshal_json_response(models.ProcessingStatusOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def status_async( self, @@ -1353,7 +1343,7 @@ async def status_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentStatusRequest( + request = models.LibrariesDocumentsGetStatusV1Request( library_id=library_id, document_id=document_id, ) @@ -1387,7 +1377,7 @@ async def status_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocumentStatus", + operation_id="libraries_documents_get_status_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1403,17 +1393,17 @@ async def status_async( return unmarshal_json_response(models.ProcessingStatusOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get_signed_url( self, @@ -1446,7 +1436,7 @@ def get_signed_url( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentSignedURLRequest( + request = models.LibrariesDocumentsGetSignedURLV1Request( library_id=library_id, document_id=document_id, ) @@ -1480,7 +1470,7 @@ def get_signed_url( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocumentSignedUrl", + operation_id="libraries_documents_get_signed_url_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1496,17 +1486,17 @@ def get_signed_url( return unmarshal_json_response(str, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_signed_url_async( self, @@ -1539,7 +1529,7 @@ async def get_signed_url_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentSignedURLRequest( + request = models.LibrariesDocumentsGetSignedURLV1Request( library_id=library_id, document_id=document_id, ) @@ -1573,7 +1563,7 @@ async def get_signed_url_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocumentSignedUrl", + operation_id="libraries_documents_get_signed_url_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1589,17 +1579,17 @@ async def get_signed_url_async( return unmarshal_json_response(str, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def extracted_text_signed_url( self, @@ -1632,7 +1622,7 @@ def extracted_text_signed_url( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentExtractedTextSignedURLRequest( + request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( library_id=library_id, document_id=document_id, ) @@ -1666,7 +1656,7 @@ def extracted_text_signed_url( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocumentExtractedTextSignedUrl", + operation_id="libraries_documents_get_extracted_text_signed_url_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1682,17 +1672,17 @@ def extracted_text_signed_url( return unmarshal_json_response(str, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def extracted_text_signed_url_async( self, @@ -1725,7 +1715,7 @@ async def extracted_text_signed_url_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentExtractedTextSignedURLRequest( + request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( library_id=library_id, document_id=document_id, ) @@ -1759,7 +1749,7 @@ async def extracted_text_signed_url_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocumentExtractedTextSignedUrl", + operation_id="libraries_documents_get_extracted_text_signed_url_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1775,17 +1765,17 @@ async def extracted_text_signed_url_async( return unmarshal_json_response(str, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def reprocess( self, @@ -1818,7 +1808,7 @@ def reprocess( else: base_url = self._get_url(base_url, url_variables) - request = models.ReprocessDocumentRequest( + request = models.LibrariesDocumentsReprocessV1Request( library_id=library_id, document_id=document_id, ) @@ -1852,7 +1842,7 @@ def reprocess( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ReprocessDocument", + operation_id="libraries_documents_reprocess_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1868,17 +1858,17 @@ def reprocess( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def reprocess_async( self, @@ -1911,7 +1901,7 @@ async def reprocess_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ReprocessDocumentRequest( + request = models.LibrariesDocumentsReprocessV1Request( library_id=library_id, document_id=document_id, ) @@ -1945,7 +1935,7 @@ async def reprocess_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ReprocessDocument", + operation_id="libraries_documents_reprocess_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1961,14 +1951,14 @@ async def reprocess_async( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/embeddings.py b/src/mistralai/client/embeddings.py index 4a056baa..5f9d3b9c 100644 --- a/src/mistralai/client/embeddings.py +++ b/src/mistralai/client/embeddings.py @@ -2,13 +2,8 @@ # @generated-id: f9c17258207e from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - embeddingdtype as models_embeddingdtype, - embeddingrequest as models_embeddingrequest, - encodingformat as models_encodingformat, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -23,13 +18,12 @@ def create( *, model: str, inputs: Union[ - models_embeddingrequest.EmbeddingRequestInputs, - models_embeddingrequest.EmbeddingRequestInputsTypedDict, + models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict ], metadata: OptionalNullable[Dict[str, Any]] = UNSET, output_dimension: OptionalNullable[int] = UNSET, - output_dtype: Optional[models_embeddingdtype.EmbeddingDtype] = None, - encoding_format: Optional[models_encodingformat.EncodingFormat] = None, + output_dtype: Optional[models.EmbeddingDtype] = None, + encoding_format: Optional[models.EncodingFormat] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -117,30 +111,29 @@ def create( return unmarshal_json_response(models.EmbeddingResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def create_async( self, *, model: str, inputs: Union[ - models_embeddingrequest.EmbeddingRequestInputs, - models_embeddingrequest.EmbeddingRequestInputsTypedDict, + models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict ], metadata: OptionalNullable[Dict[str, Any]] = UNSET, output_dimension: OptionalNullable[int] = UNSET, - output_dtype: Optional[models_embeddingdtype.EmbeddingDtype] = None, - encoding_format: Optional[models_encodingformat.EncodingFormat] = None, + output_dtype: Optional[models.EmbeddingDtype] = None, + encoding_format: Optional[models.EncodingFormat] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -228,14 +221,14 @@ async def create_async( return unmarshal_json_response(models.EmbeddingResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/errors/__init__.py b/src/mistralai/client/errors/__init__.py new file mode 100644 index 00000000..58a591a1 --- /dev/null +++ b/src/mistralai/client/errors/__init__.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0b2db51246df + +from .mistralerror import MistralError +from typing import Any, TYPE_CHECKING + +from mistralai.client.utils.dynamic_imports import lazy_getattr, lazy_dir + +if TYPE_CHECKING: + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .no_response_error import NoResponseError + from .responsevalidationerror import ResponseValidationError + from .sdkerror import SDKError + +__all__ = [ + "HTTPValidationError", + "HTTPValidationErrorData", + "MistralError", + "NoResponseError", + "ResponseValidationError", + "SDKError", +] + +_dynamic_imports: dict[str, str] = { + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "NoResponseError": ".no_response_error", + "ResponseValidationError": ".responsevalidationerror", + "SDKError": ".sdkerror", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/src/mistralai/client/models/httpvalidationerror.py b/src/mistralai/client/errors/httpvalidationerror.py similarity index 75% rename from src/mistralai/client/models/httpvalidationerror.py rename to src/mistralai/client/errors/httpvalidationerror.py index e7f0a35b..97b16562 100644 --- a/src/mistralai/client/models/httpvalidationerror.py +++ b/src/mistralai/client/errors/httpvalidationerror.py @@ -1,17 +1,17 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 4099f568a6f8 +# @generated-id: ac3de4a52bb6 from __future__ import annotations -from .validationerror import ValidationError from dataclasses import dataclass, field import httpx -from mistralai.client.models import MistralError +from mistralai.client.errors import MistralError +from mistralai.client.models import validationerror as models_validationerror from mistralai.client.types import BaseModel from typing import List, Optional class HTTPValidationErrorData(BaseModel): - detail: Optional[List[ValidationError]] = None + detail: Optional[List[models_validationerror.ValidationError]] = None @dataclass(unsafe_hash=True) diff --git a/src/mistralai/client/models/mistralerror.py b/src/mistralai/client/errors/mistralerror.py similarity index 96% rename from src/mistralai/client/models/mistralerror.py rename to src/mistralai/client/errors/mistralerror.py index 862a6be8..eb73040c 100644 --- a/src/mistralai/client/models/mistralerror.py +++ b/src/mistralai/client/errors/mistralerror.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 68ffd8394c2e +# @generated-id: d1f57f0ff1e9 import httpx from typing import Optional diff --git a/src/mistralai/client/models/no_response_error.py b/src/mistralai/client/errors/no_response_error.py similarity index 93% rename from src/mistralai/client/models/no_response_error.py rename to src/mistralai/client/errors/no_response_error.py index 7705f194..d71dfa7b 100644 --- a/src/mistralai/client/models/no_response_error.py +++ b/src/mistralai/client/errors/no_response_error.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 2849e0a482e2 +# @generated-id: 8b469ecb0906 from dataclasses import dataclass diff --git a/src/mistralai/client/models/responsevalidationerror.py b/src/mistralai/client/errors/responsevalidationerror.py similarity index 90% rename from src/mistralai/client/models/responsevalidationerror.py rename to src/mistralai/client/errors/responsevalidationerror.py index 1ed0d552..a7b3b9f0 100644 --- a/src/mistralai/client/models/responsevalidationerror.py +++ b/src/mistralai/client/errors/responsevalidationerror.py @@ -1,11 +1,11 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: c244a88981e0 +# @generated-id: 6cfaa3147abe import httpx from typing import Optional from dataclasses import dataclass -from mistralai.client.models import MistralError +from mistralai.client.errors import MistralError @dataclass(unsafe_hash=True) diff --git a/src/mistralai/client/models/sdkerror.py b/src/mistralai/client/errors/sdkerror.py similarity index 94% rename from src/mistralai/client/models/sdkerror.py rename to src/mistralai/client/errors/sdkerror.py index 101e1e6a..25b87255 100644 --- a/src/mistralai/client/models/sdkerror.py +++ b/src/mistralai/client/errors/sdkerror.py @@ -1,11 +1,11 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 12f991dad510 +# @generated-id: c489ffe1e9ca import httpx from typing import Optional from dataclasses import dataclass -from mistralai.client.models import MistralError +from mistralai.client.errors import MistralError MAX_MESSAGE_LEN = 10_000 diff --git a/src/mistralai/client/files.py b/src/mistralai/client/files.py index 57d389f1..a5f3adf6 100644 --- a/src/mistralai/client/files.py +++ b/src/mistralai/client/files.py @@ -3,14 +3,8 @@ from .basesdk import BaseSDK import httpx -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - file as models_file, - filepurpose as models_filepurpose, - sampletype as models_sampletype, - source as models_source, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -23,13 +17,13 @@ class Files(BaseSDK): def upload( self, *, - file: Union[models_file.File, models_file.FileTypedDict], - purpose: Optional[models_filepurpose.FilePurpose] = None, + file: Union[models.File, models.FileTypedDict], + purpose: Optional[models.FilePurpose] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UploadFileOut: + ) -> models.CreateFileResponse: r"""Upload File Upload a file that can be used across various endpoints. @@ -100,7 +94,7 @@ def upload( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UploadFile", + operation_id="files_api_routes_upload_file", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -112,26 +106,26 @@ def upload( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UploadFileOut, http_res) + return unmarshal_json_response(models.CreateFileResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def upload_async( self, *, - file: Union[models_file.File, models_file.FileTypedDict], - purpose: Optional[models_filepurpose.FilePurpose] = None, + file: Union[models.File, models.FileTypedDict], + purpose: Optional[models.FilePurpose] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UploadFileOut: + ) -> models.CreateFileResponse: r"""Upload File Upload a file that can be used across various endpoints. @@ -202,7 +196,7 @@ async def upload_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UploadFile", + operation_id="files_api_routes_upload_file", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -214,15 +208,15 @@ async def upload_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UploadFileOut, http_res) + return unmarshal_json_response(models.CreateFileResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def list( self, @@ -230,16 +224,16 @@ def list( page: Optional[int] = 0, page_size: Optional[int] = 100, include_total: Optional[bool] = True, - sample_type: OptionalNullable[List[models_sampletype.SampleType]] = UNSET, - source: OptionalNullable[List[models_source.Source]] = UNSET, + sample_type: OptionalNullable[List[models.SampleType]] = UNSET, + source: OptionalNullable[List[models.Source]] = UNSET, search: OptionalNullable[str] = UNSET, - purpose: OptionalNullable[models_filepurpose.FilePurpose] = UNSET, + purpose: OptionalNullable[models.FilePurpose] = UNSET, mimetypes: OptionalNullable[List[str]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListFilesOut: + ) -> models.ListFilesResponse: r"""List Files Returns a list of files that belong to the user's organization. @@ -267,7 +261,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.ListFilesRequest( + request = models.FilesAPIRoutesListFilesRequest( page=page, page_size=page_size, include_total=include_total, @@ -307,7 +301,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListFiles", + operation_id="files_api_routes_list_files", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -319,15 +313,15 @@ def list( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListFilesOut, http_res) + return unmarshal_json_response(models.ListFilesResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -335,16 +329,16 @@ async def list_async( page: Optional[int] = 0, page_size: Optional[int] = 100, include_total: Optional[bool] = True, - sample_type: OptionalNullable[List[models_sampletype.SampleType]] = UNSET, - source: OptionalNullable[List[models_source.Source]] = UNSET, + sample_type: OptionalNullable[List[models.SampleType]] = UNSET, + source: OptionalNullable[List[models.Source]] = UNSET, search: OptionalNullable[str] = UNSET, - purpose: OptionalNullable[models_filepurpose.FilePurpose] = UNSET, + purpose: OptionalNullable[models.FilePurpose] = UNSET, mimetypes: OptionalNullable[List[str]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListFilesOut: + ) -> models.ListFilesResponse: r"""List Files Returns a list of files that belong to the user's organization. @@ -372,7 +366,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ListFilesRequest( + request = models.FilesAPIRoutesListFilesRequest( page=page, page_size=page_size, include_total=include_total, @@ -412,7 +406,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListFiles", + operation_id="files_api_routes_list_files", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -424,15 +418,15 @@ async def list_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListFilesOut, http_res) + return unmarshal_json_response(models.ListFilesResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def retrieve( self, @@ -442,7 +436,7 @@ def retrieve( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveFileOut: + ) -> models.GetFileResponse: r"""Retrieve File Returns information about a specific file. @@ -463,7 +457,7 @@ def retrieve( else: base_url = self._get_url(base_url, url_variables) - request = models.RetrieveFileRequest( + request = models.FilesAPIRoutesRetrieveFileRequest( file_id=file_id, ) @@ -496,7 +490,7 @@ def retrieve( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="RetrieveFile", + operation_id="files_api_routes_retrieve_file", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -508,15 +502,15 @@ def retrieve( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.RetrieveFileOut, http_res) + return unmarshal_json_response(models.GetFileResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def retrieve_async( self, @@ -526,7 +520,7 @@ async def retrieve_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveFileOut: + ) -> models.GetFileResponse: r"""Retrieve File Returns information about a specific file. @@ -547,7 +541,7 @@ async def retrieve_async( else: base_url = self._get_url(base_url, url_variables) - request = models.RetrieveFileRequest( + request = models.FilesAPIRoutesRetrieveFileRequest( file_id=file_id, ) @@ -580,7 +574,7 @@ async def retrieve_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="RetrieveFile", + operation_id="files_api_routes_retrieve_file", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -592,15 +586,15 @@ async def retrieve_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.RetrieveFileOut, http_res) + return unmarshal_json_response(models.GetFileResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, @@ -610,7 +604,7 @@ def delete( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DeleteFileOut: + ) -> models.DeleteFileResponse: r"""Delete File Delete a file. @@ -631,7 +625,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteFileRequest( + request = models.FilesAPIRoutesDeleteFileRequest( file_id=file_id, ) @@ -664,7 +658,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteFile", + operation_id="files_api_routes_delete_file", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -676,15 +670,15 @@ def delete( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DeleteFileOut, http_res) + return unmarshal_json_response(models.DeleteFileResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -694,7 +688,7 @@ async def delete_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DeleteFileOut: + ) -> models.DeleteFileResponse: r"""Delete File Delete a file. @@ -715,7 +709,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteFileRequest( + request = models.FilesAPIRoutesDeleteFileRequest( file_id=file_id, ) @@ -748,7 +742,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteFile", + operation_id="files_api_routes_delete_file", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -760,15 +754,15 @@ async def delete_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DeleteFileOut, http_res) + return unmarshal_json_response(models.DeleteFileResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def download( self, @@ -799,7 +793,7 @@ def download( else: base_url = self._get_url(base_url, url_variables) - request = models.DownloadFileRequest( + request = models.FilesAPIRoutesDownloadFileRequest( file_id=file_id, ) @@ -832,7 +826,7 @@ def download( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DownloadFile", + operation_id="files_api_routes_download_file", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -848,13 +842,13 @@ def download( return http_res if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def download_async( self, @@ -885,7 +879,7 @@ async def download_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DownloadFileRequest( + request = models.FilesAPIRoutesDownloadFileRequest( file_id=file_id, ) @@ -918,7 +912,7 @@ async def download_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DownloadFile", + operation_id="files_api_routes_download_file", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -934,13 +928,13 @@ async def download_async( return http_res if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) def get_signed_url( self, @@ -951,7 +945,7 @@ def get_signed_url( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FileSignedURL: + ) -> models.GetSignedURLResponse: r"""Get Signed Url :param file_id: @@ -971,7 +965,7 @@ def get_signed_url( else: base_url = self._get_url(base_url, url_variables) - request = models.GetFileSignedURLRequest( + request = models.FilesAPIRoutesGetSignedURLRequest( file_id=file_id, expiry=expiry, ) @@ -1005,7 +999,7 @@ def get_signed_url( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetFileSignedUrl", + operation_id="files_api_routes_get_signed_url", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1017,15 +1011,15 @@ def get_signed_url( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.FileSignedURL, http_res) + return unmarshal_json_response(models.GetSignedURLResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_signed_url_async( self, @@ -1036,7 +1030,7 @@ async def get_signed_url_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FileSignedURL: + ) -> models.GetSignedURLResponse: r"""Get Signed Url :param file_id: @@ -1056,7 +1050,7 @@ async def get_signed_url_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetFileSignedURLRequest( + request = models.FilesAPIRoutesGetSignedURLRequest( file_id=file_id, expiry=expiry, ) @@ -1090,7 +1084,7 @@ async def get_signed_url_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetFileSignedUrl", + operation_id="files_api_routes_get_signed_url", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1102,12 +1096,12 @@ async def get_signed_url_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.FileSignedURL, http_res) + return unmarshal_json_response(models.GetSignedURLResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/fim.py b/src/mistralai/client/fim.py index be3f7742..8ffb7730 100644 --- a/src/mistralai/client/fim.py +++ b/src/mistralai/client/fim.py @@ -2,12 +2,8 @@ # @generated-id: 217bea5d701d from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - fimcompletionrequest as models_fimcompletionrequest, - fimcompletionstreamrequest as models_fimcompletionstreamrequest, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import eventstreaming, get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -28,8 +24,8 @@ def complete( stream: Optional[bool] = False, stop: Optional[ Union[ - models_fimcompletionrequest.FIMCompletionRequestStop, - models_fimcompletionrequest.FIMCompletionRequestStopTypedDict, + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, @@ -133,17 +129,17 @@ def complete( return unmarshal_json_response(models.FIMCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -156,8 +152,8 @@ async def complete_async( stream: Optional[bool] = False, stop: Optional[ Union[ - models_fimcompletionrequest.FIMCompletionRequestStop, - models_fimcompletionrequest.FIMCompletionRequestStopTypedDict, + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, @@ -261,17 +257,17 @@ async def complete_async( return unmarshal_json_response(models.FIMCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def stream( self, @@ -284,8 +280,8 @@ def stream( stream: Optional[bool] = True, stop: Optional[ Union[ - models_fimcompletionstreamrequest.FIMCompletionStreamRequestStop, - models_fimcompletionstreamrequest.FIMCompletionStreamRequestStopTypedDict, + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, @@ -396,18 +392,18 @@ def stream( if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -420,8 +416,8 @@ async def stream_async( stream: Optional[bool] = True, stop: Optional[ Union[ - models_fimcompletionstreamrequest.FIMCompletionStreamRequestStop, - models_fimcompletionstreamrequest.FIMCompletionStreamRequestStopTypedDict, + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, @@ -532,15 +528,15 @@ async def stream_async( if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/fine_tuning_jobs.py b/src/mistralai/client/fine_tuning_jobs.py index 9a28ded1..c2ee871b 100644 --- a/src/mistralai/client/fine_tuning_jobs.py +++ b/src/mistralai/client/fine_tuning_jobs.py @@ -3,15 +3,8 @@ from .basesdk import BaseSDK from datetime import datetime -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - classifiertargetin as models_classifiertargetin, - finetuneablemodeltype as models_finetuneablemodeltype, - jobin as models_jobin, - listfinetuningjobsop as models_listfinetuningjobsop, - trainingfile as models_trainingfile, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -29,7 +22,7 @@ def list( created_before: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[ - models_listfinetuningjobsop.ListFineTuningJobsStatus + models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus ] = UNSET, wandb_project: OptionalNullable[str] = UNSET, wandb_name: OptionalNullable[str] = UNSET, @@ -38,7 +31,7 @@ def list( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsOut: + ) -> models.ListFineTuningJobsResponse: r"""Get Fine Tuning Jobs Get a list of fine-tuning jobs for your organization and user. @@ -68,7 +61,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.ListFineTuningJobsRequest( + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, page_size=page_size, model=model, @@ -110,7 +103,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListFineTuningJobs", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -122,15 +115,15 @@ def list( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.JobsOut, http_res) + return unmarshal_json_response(models.ListFineTuningJobsResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -142,7 +135,7 @@ async def list_async( created_before: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[ - models_listfinetuningjobsop.ListFineTuningJobsStatus + models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus ] = UNSET, wandb_project: OptionalNullable[str] = UNSET, wandb_name: OptionalNullable[str] = UNSET, @@ -151,7 +144,7 @@ async def list_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsOut: + ) -> models.ListFineTuningJobsResponse: r"""Get Fine Tuning Jobs Get a list of fine-tuning jobs for your organization and user. @@ -181,7 +174,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ListFineTuningJobsRequest( + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, page_size=page_size, model=model, @@ -223,7 +216,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListFineTuningJobs", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -235,59 +228,49 @@ async def list_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.JobsOut, http_res) + return unmarshal_json_response(models.ListFineTuningJobsResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def create( self, *, model: str, - hyperparameters: Union[ - models_jobin.Hyperparameters, models_jobin.HyperparametersTypedDict - ], + hyperparameters: Union[models.Hyperparameters, models.HyperparametersTypedDict], training_files: Optional[ - Union[ - List[models_trainingfile.TrainingFile], - List[models_trainingfile.TrainingFileTypedDict], - ] + Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] ] = None, validation_files: OptionalNullable[List[str]] = UNSET, suffix: OptionalNullable[str] = UNSET, integrations: OptionalNullable[ Union[ - List[models_jobin.JobInIntegration], - List[models_jobin.JobInIntegrationTypedDict], + List[models.CreateFineTuningJobRequestIntegration], + List[models.CreateFineTuningJobRequestIntegrationTypedDict], ] ] = UNSET, auto_start: Optional[bool] = None, invalid_sample_skip_percentage: Optional[float] = 0, - job_type: OptionalNullable[ - models_finetuneablemodeltype.FineTuneableModelType - ] = UNSET, + job_type: OptionalNullable[models.FineTuneableModelType] = UNSET, repositories: OptionalNullable[ Union[ - List[models_jobin.JobInRepository], - List[models_jobin.JobInRepositoryTypedDict], + List[models.CreateFineTuningJobRequestRepository], + List[models.CreateFineTuningJobRequestRepositoryTypedDict], ] ] = UNSET, classifier_targets: OptionalNullable[ - Union[ - List[models_classifiertargetin.ClassifierTargetIn], - List[models_classifiertargetin.ClassifierTargetInTypedDict], - ] + Union[List[models.ClassifierTarget], List[models.ClassifierTargetTypedDict]] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.CreateFineTuningJobResponse: + ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: r"""Create Fine Tuning Job Create a new fine-tuning job, it will be queued for processing. @@ -318,7 +301,7 @@ def create( else: base_url = self._get_url(base_url, url_variables) - request = models.JobIn( + request = models.CreateFineTuningJobRequest( model=model, training_files=utils.get_pydantic_model( training_files, Optional[List[models.TrainingFile]] @@ -326,7 +309,8 @@ def create( validation_files=validation_files, suffix=suffix, integrations=utils.get_pydantic_model( - integrations, OptionalNullable[List[models.JobInIntegration]] + integrations, + OptionalNullable[List[models.CreateFineTuningJobRequestIntegration]], ), auto_start=auto_start, invalid_sample_skip_percentage=invalid_sample_skip_percentage, @@ -335,10 +319,11 @@ def create( hyperparameters, models.Hyperparameters ), repositories=utils.get_pydantic_model( - repositories, OptionalNullable[List[models.JobInRepository]] + repositories, + OptionalNullable[List[models.CreateFineTuningJobRequestRepository]], ), classifier_targets=utils.get_pydantic_model( - classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] + classifier_targets, OptionalNullable[List[models.ClassifierTarget]] ), ) @@ -356,7 +341,7 @@ def create( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.JobIn + request, False, False, "json", models.CreateFineTuningJobRequest ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -374,7 +359,7 @@ def create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateFineTuningJob", + operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -386,59 +371,51 @@ def create( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.CreateFineTuningJobResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def create_async( self, *, model: str, - hyperparameters: Union[ - models_jobin.Hyperparameters, models_jobin.HyperparametersTypedDict - ], + hyperparameters: Union[models.Hyperparameters, models.HyperparametersTypedDict], training_files: Optional[ - Union[ - List[models_trainingfile.TrainingFile], - List[models_trainingfile.TrainingFileTypedDict], - ] + Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] ] = None, validation_files: OptionalNullable[List[str]] = UNSET, suffix: OptionalNullable[str] = UNSET, integrations: OptionalNullable[ Union[ - List[models_jobin.JobInIntegration], - List[models_jobin.JobInIntegrationTypedDict], + List[models.CreateFineTuningJobRequestIntegration], + List[models.CreateFineTuningJobRequestIntegrationTypedDict], ] ] = UNSET, auto_start: Optional[bool] = None, invalid_sample_skip_percentage: Optional[float] = 0, - job_type: OptionalNullable[ - models_finetuneablemodeltype.FineTuneableModelType - ] = UNSET, + job_type: OptionalNullable[models.FineTuneableModelType] = UNSET, repositories: OptionalNullable[ Union[ - List[models_jobin.JobInRepository], - List[models_jobin.JobInRepositoryTypedDict], + List[models.CreateFineTuningJobRequestRepository], + List[models.CreateFineTuningJobRequestRepositoryTypedDict], ] ] = UNSET, classifier_targets: OptionalNullable[ - Union[ - List[models_classifiertargetin.ClassifierTargetIn], - List[models_classifiertargetin.ClassifierTargetInTypedDict], - ] + Union[List[models.ClassifierTarget], List[models.ClassifierTargetTypedDict]] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.CreateFineTuningJobResponse: + ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: r"""Create Fine Tuning Job Create a new fine-tuning job, it will be queued for processing. @@ -469,7 +446,7 @@ async def create_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobIn( + request = models.CreateFineTuningJobRequest( model=model, training_files=utils.get_pydantic_model( training_files, Optional[List[models.TrainingFile]] @@ -477,7 +454,8 @@ async def create_async( validation_files=validation_files, suffix=suffix, integrations=utils.get_pydantic_model( - integrations, OptionalNullable[List[models.JobInIntegration]] + integrations, + OptionalNullable[List[models.CreateFineTuningJobRequestIntegration]], ), auto_start=auto_start, invalid_sample_skip_percentage=invalid_sample_skip_percentage, @@ -486,10 +464,11 @@ async def create_async( hyperparameters, models.Hyperparameters ), repositories=utils.get_pydantic_model( - repositories, OptionalNullable[List[models.JobInRepository]] + repositories, + OptionalNullable[List[models.CreateFineTuningJobRequestRepository]], ), classifier_targets=utils.get_pydantic_model( - classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] + classifier_targets, OptionalNullable[List[models.ClassifierTarget]] ), ) @@ -507,7 +486,7 @@ async def create_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.JobIn + request, False, False, "json", models.CreateFineTuningJobRequest ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -525,7 +504,7 @@ async def create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateFineTuningJob", + operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -537,15 +516,17 @@ async def create_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.CreateFineTuningJobResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get( self, @@ -555,7 +536,7 @@ def get( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.GetFineTuningJobResponse: + ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: r"""Get Fine Tuning Job Get a fine-tuned job details by its UUID. @@ -576,7 +557,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.GetFineTuningJobRequest( + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( job_id=job_id, ) @@ -609,7 +590,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetFineTuningJob", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -621,15 +602,17 @@ def get( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.GetFineTuningJobResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -639,7 +622,7 @@ async def get_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.GetFineTuningJobResponse: + ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: r"""Get Fine Tuning Job Get a fine-tuned job details by its UUID. @@ -660,7 +643,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetFineTuningJobRequest( + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( job_id=job_id, ) @@ -693,7 +676,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetFineTuningJob", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -705,15 +688,17 @@ async def get_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.GetFineTuningJobResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def cancel( self, @@ -723,7 +708,7 @@ def cancel( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.CancelFineTuningJobResponse: + ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: r"""Cancel Fine Tuning Job Request the cancellation of a fine tuning job. @@ -744,7 +729,7 @@ def cancel( else: base_url = self._get_url(base_url, url_variables) - request = models.CancelFineTuningJobRequest( + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( job_id=job_id, ) @@ -777,7 +762,7 @@ def cancel( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CancelFineTuningJob", + operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -789,15 +774,17 @@ def cancel( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.CancelFineTuningJobResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def cancel_async( self, @@ -807,7 +794,7 @@ async def cancel_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.CancelFineTuningJobResponse: + ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: r"""Cancel Fine Tuning Job Request the cancellation of a fine tuning job. @@ -828,7 +815,7 @@ async def cancel_async( else: base_url = self._get_url(base_url, url_variables) - request = models.CancelFineTuningJobRequest( + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( job_id=job_id, ) @@ -861,7 +848,7 @@ async def cancel_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CancelFineTuningJob", + operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -873,15 +860,17 @@ async def cancel_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.CancelFineTuningJobResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def start( self, @@ -891,7 +880,7 @@ def start( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.StartFineTuningJobResponse: + ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: r"""Start Fine Tuning Job Request the start of a validated fine tuning job. @@ -912,7 +901,7 @@ def start( else: base_url = self._get_url(base_url, url_variables) - request = models.StartFineTuningJobRequest( + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( job_id=job_id, ) @@ -945,7 +934,7 @@ def start( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="StartFineTuningJob", + operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -957,15 +946,17 @@ def start( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.StartFineTuningJobResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def start_async( self, @@ -975,7 +966,7 @@ async def start_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.StartFineTuningJobResponse: + ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: r"""Start Fine Tuning Job Request the start of a validated fine tuning job. @@ -996,7 +987,7 @@ async def start_async( else: base_url = self._get_url(base_url, url_variables) - request = models.StartFineTuningJobRequest( + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( job_id=job_id, ) @@ -1029,7 +1020,7 @@ async def start_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="StartFineTuningJob", + operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1041,12 +1032,14 @@ async def start_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.StartFineTuningJobResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/libraries.py b/src/mistralai/client/libraries.py index 26ceabe1..b8728362 100644 --- a/src/mistralai/client/libraries.py +++ b/src/mistralai/client/libraries.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext from mistralai.client.accesses import Accesses from mistralai.client.documents import Documents @@ -39,7 +39,7 @@ def list( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListLibraryOut: + ) -> models.ListLibrariesResponse: r"""List all libraries you have access to. List all libraries that you have created or have been shared with you. @@ -87,7 +87,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListLibraries", + operation_id="libraries_list_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -99,15 +99,15 @@ def list( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListLibraryOut, http_res) + return unmarshal_json_response(models.ListLibrariesResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -116,7 +116,7 @@ async def list_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListLibraryOut: + ) -> models.ListLibrariesResponse: r"""List all libraries you have access to. List all libraries that you have created or have been shared with you. @@ -164,7 +164,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListLibraries", + operation_id="libraries_list_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -176,15 +176,15 @@ async def list_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListLibraryOut, http_res) + return unmarshal_json_response(models.ListLibrariesResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def create( self, @@ -196,7 +196,7 @@ def create( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Create a new Library. Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. @@ -219,7 +219,7 @@ def create( else: base_url = self._get_url(base_url, url_variables) - request = models.LibraryIn( + request = models.CreateLibraryRequest( name=name, description=description, chunk_size=chunk_size, @@ -239,7 +239,7 @@ def create( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.LibraryIn + request, False, False, "json", models.CreateLibraryRequest ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -257,7 +257,7 @@ def create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateLibrary", + operation_id="libraries_create_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -270,20 +270,20 @@ def create( response_data: Any = None if utils.match_response(http_res, "201", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def create_async( self, @@ -295,7 +295,7 @@ async def create_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Create a new Library. Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. @@ -318,7 +318,7 @@ async def create_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibraryIn( + request = models.CreateLibraryRequest( name=name, description=description, chunk_size=chunk_size, @@ -338,7 +338,7 @@ async def create_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.LibraryIn + request, False, False, "json", models.CreateLibraryRequest ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -356,7 +356,7 @@ async def create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateLibrary", + operation_id="libraries_create_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -369,20 +369,20 @@ async def create_async( response_data: Any = None if utils.match_response(http_res, "201", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get( self, @@ -392,7 +392,7 @@ def get( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Detailed information about a specific Library. Given a library id, details information about that Library. @@ -413,7 +413,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.GetLibraryRequest( + request = models.LibrariesGetV1Request( library_id=library_id, ) @@ -446,7 +446,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetLibrary", + operation_id="libraries_get_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -459,20 +459,20 @@ def get( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -482,7 +482,7 @@ async def get_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Detailed information about a specific Library. Given a library id, details information about that Library. @@ -503,7 +503,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetLibraryRequest( + request = models.LibrariesGetV1Request( library_id=library_id, ) @@ -536,7 +536,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetLibrary", + operation_id="libraries_get_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -549,20 +549,20 @@ async def get_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, @@ -572,7 +572,7 @@ def delete( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Delete a library and all of it's document. Given a library id, deletes it together with all documents that have been uploaded to that library. @@ -593,7 +593,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteLibraryRequest( + request = models.LibrariesDeleteV1Request( library_id=library_id, ) @@ -626,7 +626,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteLibrary", + operation_id="libraries_delete_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -639,20 +639,20 @@ def delete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -662,7 +662,7 @@ async def delete_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Delete a library and all of it's document. Given a library id, deletes it together with all documents that have been uploaded to that library. @@ -683,7 +683,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteLibraryRequest( + request = models.LibrariesDeleteV1Request( library_id=library_id, ) @@ -716,7 +716,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteLibrary", + operation_id="libraries_delete_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -729,20 +729,20 @@ async def delete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def update( self, @@ -754,7 +754,7 @@ def update( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Update a library. Given a library id, you can update the name and description. @@ -777,9 +777,9 @@ def update( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateLibraryRequest( + request = models.LibrariesUpdateV1Request( library_id=library_id, - library_in_update=models.LibraryInUpdate( + update_library_request=models.UpdateLibraryRequest( name=name, description=description, ), @@ -799,7 +799,11 @@ def update( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.library_in_update, False, False, "json", models.LibraryInUpdate + request.update_library_request, + False, + False, + "json", + models.UpdateLibraryRequest, ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -817,7 +821,7 @@ def update( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateLibrary", + operation_id="libraries_update_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -830,20 +834,20 @@ def update( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def update_async( self, @@ -855,7 +859,7 @@ async def update_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Update a library. Given a library id, you can update the name and description. @@ -878,9 +882,9 @@ async def update_async( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateLibraryRequest( + request = models.LibrariesUpdateV1Request( library_id=library_id, - library_in_update=models.LibraryInUpdate( + update_library_request=models.UpdateLibraryRequest( name=name, description=description, ), @@ -900,7 +904,11 @@ async def update_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.library_in_update, False, False, "json", models.LibraryInUpdate + request.update_library_request, + False, + False, + "json", + models.UpdateLibraryRequest, ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -918,7 +926,7 @@ async def update_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateLibrary", + operation_id="libraries_update_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -931,17 +939,17 @@ async def update_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/models/__init__.py b/src/mistralai/client/models/__init__.py index 093ffcbd..5ef8b3f3 100644 --- a/src/mistralai/client/models/__init__.py +++ b/src/mistralai/client/models/__init__.py @@ -1,42 +1,116 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" # @generated-id: e0e8dad92725 -from .mistralerror import MistralError -from typing import TYPE_CHECKING -from importlib import import_module -import builtins -import sys +from typing import Any, TYPE_CHECKING + +from mistralai.client.utils.dynamic_imports import lazy_getattr, lazy_dir if TYPE_CHECKING: - from .agent import Agent, AgentObject, AgentTool, AgentToolTypedDict, AgentTypedDict + from .agent import ( + Agent, + AgentTool, + AgentToolTypedDict, + AgentTypedDict, + UnknownAgentTool, + ) from .agentaliasresponse import AgentAliasResponse, AgentAliasResponseTypedDict from .agentconversation import ( AgentConversation, AgentConversationAgentVersion, AgentConversationAgentVersionTypedDict, - AgentConversationObject, AgentConversationTypedDict, ) - from .agentcreationrequest import ( - AgentCreationRequest, - AgentCreationRequestTool, - AgentCreationRequestToolTypedDict, - AgentCreationRequestTypedDict, - ) from .agenthandoffdoneevent import ( AgentHandoffDoneEvent, AgentHandoffDoneEventTypedDict, ) - from .agenthandoffentry import ( - AgentHandoffEntry, - AgentHandoffEntryObject, - AgentHandoffEntryType, - AgentHandoffEntryTypedDict, - ) + from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict from .agenthandoffstartedevent import ( AgentHandoffStartedEvent, AgentHandoffStartedEventTypedDict, ) + from .agents_api_v1_agents_create_or_update_aliasop import ( + AgentsAPIV1AgentsCreateOrUpdateAliasRequest, + AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict, + ) + from .agents_api_v1_agents_delete_aliasop import ( + AgentsAPIV1AgentsDeleteAliasRequest, + AgentsAPIV1AgentsDeleteAliasRequestTypedDict, + ) + from .agents_api_v1_agents_deleteop import ( + AgentsAPIV1AgentsDeleteRequest, + AgentsAPIV1AgentsDeleteRequestTypedDict, + ) + from .agents_api_v1_agents_get_versionop import ( + AgentsAPIV1AgentsGetVersionRequest, + AgentsAPIV1AgentsGetVersionRequestTypedDict, + ) + from .agents_api_v1_agents_getop import ( + AgentsAPIV1AgentsGetAgentVersion, + AgentsAPIV1AgentsGetAgentVersionTypedDict, + AgentsAPIV1AgentsGetRequest, + AgentsAPIV1AgentsGetRequestTypedDict, + ) + from .agents_api_v1_agents_list_version_aliasesop import ( + AgentsAPIV1AgentsListVersionAliasesRequest, + AgentsAPIV1AgentsListVersionAliasesRequestTypedDict, + ) + from .agents_api_v1_agents_list_versionsop import ( + AgentsAPIV1AgentsListVersionsRequest, + AgentsAPIV1AgentsListVersionsRequestTypedDict, + ) + from .agents_api_v1_agents_listop import ( + AgentsAPIV1AgentsListRequest, + AgentsAPIV1AgentsListRequestTypedDict, + ) + from .agents_api_v1_agents_update_versionop import ( + AgentsAPIV1AgentsUpdateVersionRequest, + AgentsAPIV1AgentsUpdateVersionRequestTypedDict, + ) + from .agents_api_v1_agents_updateop import ( + AgentsAPIV1AgentsUpdateRequest, + AgentsAPIV1AgentsUpdateRequestTypedDict, + ) + from .agents_api_v1_conversations_append_streamop import ( + AgentsAPIV1ConversationsAppendStreamRequest, + AgentsAPIV1ConversationsAppendStreamRequestTypedDict, + ) + from .agents_api_v1_conversations_appendop import ( + AgentsAPIV1ConversationsAppendRequest, + AgentsAPIV1ConversationsAppendRequestTypedDict, + ) + from .agents_api_v1_conversations_deleteop import ( + AgentsAPIV1ConversationsDeleteRequest, + AgentsAPIV1ConversationsDeleteRequestTypedDict, + ) + from .agents_api_v1_conversations_getop import ( + AgentsAPIV1ConversationsGetRequest, + AgentsAPIV1ConversationsGetRequestTypedDict, + ResponseV1ConversationsGet, + ResponseV1ConversationsGetTypedDict, + ) + from .agents_api_v1_conversations_historyop import ( + AgentsAPIV1ConversationsHistoryRequest, + AgentsAPIV1ConversationsHistoryRequestTypedDict, + ) + from .agents_api_v1_conversations_listop import ( + AgentsAPIV1ConversationsListRequest, + AgentsAPIV1ConversationsListRequestTypedDict, + AgentsAPIV1ConversationsListResponse, + AgentsAPIV1ConversationsListResponseTypedDict, + ) + from .agents_api_v1_conversations_messagesop import ( + AgentsAPIV1ConversationsMessagesRequest, + AgentsAPIV1ConversationsMessagesRequestTypedDict, + ) + from .agents_api_v1_conversations_restart_streamop import ( + AgentsAPIV1ConversationsRestartStreamRequest, + AgentsAPIV1ConversationsRestartStreamRequestTypedDict, + ) + from .agents_api_v1_conversations_restartop import ( + AgentsAPIV1ConversationsRestartRequest, + AgentsAPIV1ConversationsRestartRequestTypedDict, + ) from .agentscompletionrequest import ( AgentsCompletionRequest, AgentsCompletionRequestMessage, @@ -57,28 +131,15 @@ AgentsCompletionStreamRequestToolChoiceTypedDict, AgentsCompletionStreamRequestTypedDict, ) - from .agentupdaterequest import ( - AgentUpdateRequest, - AgentUpdateRequestTool, - AgentUpdateRequestToolTypedDict, - AgentUpdateRequestTypedDict, - ) from .apiendpoint import APIEndpoint - from .appendconversationop import ( - AppendConversationRequest, - AppendConversationRequestTypedDict, - ) - from .appendconversationstreamop import ( - AppendConversationStreamRequest, - AppendConversationStreamRequestTypedDict, + from .archivemodelresponse import ( + ArchiveModelResponse, + ArchiveModelResponseTypedDict, ) - from .archiveftmodelout import ArchiveFTModelOut, ArchiveFTModelOutTypedDict - from .archivemodelop import ArchiveModelRequest, ArchiveModelRequestTypedDict from .assistantmessage import ( AssistantMessage, AssistantMessageContent, AssistantMessageContentTypedDict, - AssistantMessageRole, AssistantMessageTypedDict, ) from .audiochunk import AudioChunk, AudioChunkTypedDict @@ -94,19 +155,10 @@ ) from .basemodelcard import BaseModelCard, BaseModelCardTypedDict from .batcherror import BatchError, BatchErrorTypedDict - from .batchjobin import BatchJobIn, BatchJobInTypedDict - from .batchjobout import BatchJobOut, BatchJobOutTypedDict - from .batchjobsout import BatchJobsOut, BatchJobsOutTypedDict + from .batchjob import BatchJob, BatchJobTypedDict from .batchjobstatus import BatchJobStatus from .batchrequest import BatchRequest, BatchRequestTypedDict from .builtinconnectors import BuiltInConnectors - from .cancelbatchjobop import CancelBatchJobRequest, CancelBatchJobRequestTypedDict - from .cancelfinetuningjobop import ( - CancelFineTuningJobRequest, - CancelFineTuningJobRequestTypedDict, - CancelFineTuningJobResponse, - CancelFineTuningJobResponseTypedDict, - ) from .chatclassificationrequest import ( ChatClassificationRequest, ChatClassificationRequestTypedDict, @@ -150,7 +202,7 @@ ChatModerationRequestInputs3TypedDict, ChatModerationRequestTypedDict, ) - from .checkpointout import CheckpointOut, CheckpointOutTypedDict + from .checkpoint import Checkpoint, CheckpointTypedDict from .classificationrequest import ( ClassificationRequest, ClassificationRequestInputs, @@ -165,60 +217,65 @@ ClassificationTargetResult, ClassificationTargetResultTypedDict, ) - from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutIntegration, - ClassifierDetailedJobOutIntegrationTypedDict, - ClassifierDetailedJobOutStatus, - ClassifierDetailedJobOutTypedDict, - ) - from .classifierftmodelout import ( - ClassifierFTModelOut, - ClassifierFTModelOutTypedDict, - ) - from .classifierjobout import ( - ClassifierJobOut, - ClassifierJobOutIntegration, - ClassifierJobOutIntegrationTypedDict, - ClassifierJobOutStatus, - ClassifierJobOutTypedDict, - ) - from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict - from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict + from .classifierfinetunedmodel import ( + ClassifierFineTunedModel, + ClassifierFineTunedModelTypedDict, + ) + from .classifierfinetuningjob import ( + ClassifierFineTuningJob, + ClassifierFineTuningJobIntegration, + ClassifierFineTuningJobIntegrationTypedDict, + ClassifierFineTuningJobStatus, + ClassifierFineTuningJobTypedDict, + UnknownClassifierFineTuningJobIntegration, + ) + from .classifierfinetuningjobdetails import ( + ClassifierFineTuningJobDetails, + ClassifierFineTuningJobDetailsIntegration, + ClassifierFineTuningJobDetailsIntegrationTypedDict, + ClassifierFineTuningJobDetailsStatus, + ClassifierFineTuningJobDetailsTypedDict, + UnknownClassifierFineTuningJobDetailsIntegration, + ) + from .classifiertarget import ClassifierTarget, ClassifierTargetTypedDict + from .classifiertargetresult import ( + ClassifierTargetResult, + ClassifierTargetResultTypedDict, + ) from .classifiertrainingparameters import ( ClassifierTrainingParameters, ClassifierTrainingParametersTypedDict, ) - from .classifiertrainingparametersin import ( - ClassifierTrainingParametersIn, - ClassifierTrainingParametersInTypedDict, - ) from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict from .completionargs import CompletionArgs, CompletionArgsTypedDict from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict from .completionchunk import CompletionChunk, CompletionChunkTypedDict - from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutIntegration, - CompletionDetailedJobOutIntegrationTypedDict, - CompletionDetailedJobOutRepository, - CompletionDetailedJobOutRepositoryTypedDict, - CompletionDetailedJobOutStatus, - CompletionDetailedJobOutTypedDict, - ) from .completionevent import CompletionEvent, CompletionEventTypedDict - from .completionftmodelout import ( - CompletionFTModelOut, - CompletionFTModelOutTypedDict, - ) - from .completionjobout import ( - CompletionJobOut, - CompletionJobOutIntegration, - CompletionJobOutIntegrationTypedDict, - CompletionJobOutRepository, - CompletionJobOutRepositoryTypedDict, - CompletionJobOutStatus, - CompletionJobOutTypedDict, + from .completionfinetunedmodel import ( + CompletionFineTunedModel, + CompletionFineTunedModelTypedDict, + ) + from .completionfinetuningjob import ( + CompletionFineTuningJob, + CompletionFineTuningJobIntegration, + CompletionFineTuningJobIntegrationTypedDict, + CompletionFineTuningJobRepository, + CompletionFineTuningJobRepositoryTypedDict, + CompletionFineTuningJobStatus, + CompletionFineTuningJobTypedDict, + UnknownCompletionFineTuningJobIntegration, + UnknownCompletionFineTuningJobRepository, + ) + from .completionfinetuningjobdetails import ( + CompletionFineTuningJobDetails, + CompletionFineTuningJobDetailsIntegration, + CompletionFineTuningJobDetailsIntegrationTypedDict, + CompletionFineTuningJobDetailsRepository, + CompletionFineTuningJobDetailsRepositoryTypedDict, + CompletionFineTuningJobDetailsStatus, + CompletionFineTuningJobDetailsTypedDict, + UnknownCompletionFineTuningJobDetailsIntegration, + UnknownCompletionFineTuningJobDetailsRepository, ) from .completionresponsestreamchoice import ( CompletionResponseStreamChoice, @@ -229,11 +286,7 @@ CompletionTrainingParameters, CompletionTrainingParametersTypedDict, ) - from .completiontrainingparametersin import ( - CompletionTrainingParametersIn, - CompletionTrainingParametersInTypedDict, - ) - from .contentchunk import ContentChunk, ContentChunkTypedDict + from .contentchunk import ContentChunk, ContentChunkTypedDict, UnknownContentChunk from .conversationappendrequest import ( ConversationAppendRequest, ConversationAppendRequestHandoffExecution, @@ -249,10 +302,10 @@ ConversationEventsData, ConversationEventsDataTypedDict, ConversationEventsTypedDict, + UnknownConversationEventsData, ) from .conversationhistory import ( ConversationHistory, - ConversationHistoryObject, ConversationHistoryTypedDict, Entry, EntryTypedDict, @@ -260,7 +313,6 @@ from .conversationinputs import ConversationInputs, ConversationInputsTypedDict from .conversationmessages import ( ConversationMessages, - ConversationMessagesObject, ConversationMessagesTypedDict, ) from .conversationrequest import ( @@ -274,7 +326,6 @@ ) from .conversationresponse import ( ConversationResponse, - ConversationResponseObject, ConversationResponseTypedDict, Output, OutputTypedDict, @@ -302,38 +353,46 @@ ConversationStreamRequestToolTypedDict, ConversationStreamRequestTypedDict, ) + from .conversationthinkchunk import ( + ConversationThinkChunk, + ConversationThinkChunkThinking, + ConversationThinkChunkThinkingTypedDict, + ConversationThinkChunkTypedDict, + ) from .conversationusageinfo import ( ConversationUsageInfo, ConversationUsageInfoTypedDict, ) - from .createfinetuningjobop import ( - CreateFineTuningJobResponse, - CreateFineTuningJobResponseTypedDict, - Response, - ResponseTypedDict, - ) - from .createorupdateagentaliasop import ( - CreateOrUpdateAgentAliasRequest, - CreateOrUpdateAgentAliasRequestTypedDict, - ) - from .deleteagentaliasop import ( - DeleteAgentAliasRequest, - DeleteAgentAliasRequestTypedDict, + from .createagentrequest import ( + CreateAgentRequest, + CreateAgentRequestTool, + CreateAgentRequestToolTypedDict, + CreateAgentRequestTypedDict, + ) + from .createbatchjobrequest import ( + CreateBatchJobRequest, + CreateBatchJobRequestTypedDict, + ) + from .createfileresponse import CreateFileResponse, CreateFileResponseTypedDict + from .createfinetuningjobrequest import ( + CreateFineTuningJobRequest, + CreateFineTuningJobRequestIntegration, + CreateFineTuningJobRequestIntegrationTypedDict, + CreateFineTuningJobRequestRepository, + CreateFineTuningJobRequestRepositoryTypedDict, + CreateFineTuningJobRequestTypedDict, + Hyperparameters, + HyperparametersTypedDict, ) - from .deleteagentop import DeleteAgentRequest, DeleteAgentRequestTypedDict - from .deleteconversationop import ( - DeleteConversationRequest, - DeleteConversationRequestTypedDict, + from .createlibraryrequest import ( + CreateLibraryRequest, + CreateLibraryRequestTypedDict, ) - from .deletedocumentop import DeleteDocumentRequest, DeleteDocumentRequestTypedDict - from .deletefileop import DeleteFileRequest, DeleteFileRequestTypedDict - from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict - from .deletelibraryaccessop import ( - DeleteLibraryAccessRequest, - DeleteLibraryAccessRequestTypedDict, + from .delete_model_v1_models_model_id_deleteop import ( + DeleteModelV1ModelsModelIDDeleteRequest, + DeleteModelV1ModelsModelIDDeleteRequestTypedDict, ) - from .deletelibraryop import DeleteLibraryRequest, DeleteLibraryRequestTypedDict - from .deletemodelop import DeleteModelRequest, DeleteModelRequestTypedDict + from .deletefileresponse import DeleteFileResponse, DeleteFileResponseTypedDict from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict from .deltamessage import ( DeltaMessage, @@ -341,21 +400,10 @@ DeltaMessageContentTypedDict, DeltaMessageTypedDict, ) + from .document import Document, DocumentTypedDict from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict - from .documentout import DocumentOut, DocumentOutTypedDict from .documenttextcontent import DocumentTextContent, DocumentTextContentTypedDict - from .documentupdatein import ( - Attributes, - AttributesTypedDict, - DocumentUpdateIn, - DocumentUpdateInTypedDict, - ) - from .documenturlchunk import ( - DocumentURLChunk, - DocumentURLChunkType, - DocumentURLChunkTypedDict, - ) - from .downloadfileop import DownloadFileRequest, DownloadFileRequestTypedDict + from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .embeddingdtype import EmbeddingDtype from .embeddingrequest import ( EmbeddingRequest, @@ -370,12 +418,35 @@ ) from .encodingformat import EncodingFormat from .entitytype import EntityType - from .eventout import EventOut, EventOutTypedDict + from .event import Event, EventTypedDict from .file import File, FileTypedDict from .filechunk import FileChunk, FileChunkTypedDict from .filepurpose import FilePurpose + from .files_api_routes_delete_fileop import ( + FilesAPIRoutesDeleteFileRequest, + FilesAPIRoutesDeleteFileRequestTypedDict, + ) + from .files_api_routes_download_fileop import ( + FilesAPIRoutesDownloadFileRequest, + FilesAPIRoutesDownloadFileRequestTypedDict, + ) + from .files_api_routes_get_signed_urlop import ( + FilesAPIRoutesGetSignedURLRequest, + FilesAPIRoutesGetSignedURLRequestTypedDict, + ) + from .files_api_routes_list_filesop import ( + FilesAPIRoutesListFilesRequest, + FilesAPIRoutesListFilesRequestTypedDict, + ) + from .files_api_routes_retrieve_fileop import ( + FilesAPIRoutesRetrieveFileRequest, + FilesAPIRoutesRetrieveFileRequestTypedDict, + ) + from .files_api_routes_upload_fileop import ( + MultiPartBodyParams, + MultiPartBodyParamsTypedDict, + ) from .fileschema import FileSchema, FileSchemaTypedDict - from .filesignedurl import FileSignedURL, FileSignedURLTypedDict from .fimcompletionrequest import ( FIMCompletionRequest, FIMCompletionRequestStop, @@ -393,11 +464,11 @@ FIMCompletionStreamRequestTypedDict, ) from .finetuneablemodeltype import FineTuneableModelType - from .ftclassifierlossfunction import FTClassifierLossFunction - from .ftmodelcapabilitiesout import ( - FTModelCapabilitiesOut, - FTModelCapabilitiesOutTypedDict, + from .finetunedmodelcapabilities import ( + FineTunedModelCapabilities, + FineTunedModelCapabilitiesTypedDict, ) + from .ftclassifierlossfunction import FTClassifierLossFunction from .ftmodelcard import FTModelCard, FTModelCardTypedDict from .function import Function, FunctionTypedDict from .functioncall import ( @@ -408,157 +479,196 @@ ) from .functioncallentry import ( FunctionCallEntry, - FunctionCallEntryObject, - FunctionCallEntryType, + FunctionCallEntryConfirmationStatus, FunctionCallEntryTypedDict, ) from .functioncallentryarguments import ( FunctionCallEntryArguments, FunctionCallEntryArgumentsTypedDict, ) - from .functioncallevent import FunctionCallEvent, FunctionCallEventTypedDict - from .functionname import FunctionName, FunctionNameTypedDict - from .functionresultentry import ( - FunctionResultEntry, - FunctionResultEntryObject, - FunctionResultEntryType, - FunctionResultEntryTypedDict, + from .functioncallevent import ( + FunctionCallEvent, + FunctionCallEventConfirmationStatus, + FunctionCallEventTypedDict, ) + from .functionname import FunctionName, FunctionNameTypedDict + from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict from .functiontool import FunctionTool, FunctionToolTypedDict - from .getagentop import ( - GetAgentAgentVersion, - GetAgentAgentVersionTypedDict, - GetAgentRequest, - GetAgentRequestTypedDict, - ) - from .getagentversionop import ( - GetAgentVersionRequest, - GetAgentVersionRequestTypedDict, - ) - from .getbatchjobop import GetBatchJobRequest, GetBatchJobRequestTypedDict - from .getconversationhistoryop import ( - GetConversationHistoryRequest, - GetConversationHistoryRequestTypedDict, - ) - from .getconversationmessagesop import ( - GetConversationMessagesRequest, - GetConversationMessagesRequestTypedDict, - ) - from .getconversationop import ( - GetConversationRequest, - GetConversationRequestTypedDict, - ResponseV1ConversationsGet, - ResponseV1ConversationsGetTypedDict, - ) - from .getdocumentextractedtextsignedurlop import ( - GetDocumentExtractedTextSignedURLRequest, - GetDocumentExtractedTextSignedURLRequestTypedDict, - ) - from .getdocumentop import GetDocumentRequest, GetDocumentRequestTypedDict - from .getdocumentsignedurlop import ( - GetDocumentSignedURLRequest, - GetDocumentSignedURLRequestTypedDict, - ) - from .getdocumentstatusop import ( - GetDocumentStatusRequest, - GetDocumentStatusRequestTypedDict, - ) - from .getdocumenttextcontentop import ( - GetDocumentTextContentRequest, - GetDocumentTextContentRequestTypedDict, - ) - from .getfilesignedurlop import ( - GetFileSignedURLRequest, - GetFileSignedURLRequestTypedDict, + from .getfileresponse import GetFileResponse, GetFileResponseTypedDict + from .getsignedurlresponse import ( + GetSignedURLResponse, + GetSignedURLResponseTypedDict, ) - from .getfinetuningjobop import ( - GetFineTuningJobRequest, - GetFineTuningJobRequestTypedDict, - GetFineTuningJobResponse, - GetFineTuningJobResponseTypedDict, - ) - from .getlibraryop import GetLibraryRequest, GetLibraryRequestTypedDict + from .githubrepository import GithubRepository, GithubRepositoryTypedDict from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict - from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict - from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .imagedetail import ImageDetail from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict from .imageurl import ImageURL, ImageURLTypedDict from .imageurlchunk import ( ImageURLChunk, - ImageURLChunkType, ImageURLChunkTypedDict, ImageURLUnion, ImageURLUnionTypedDict, ) from .inputentries import InputEntries, InputEntriesTypedDict - from .inputs import ( - Inputs, - InputsMessage, - InputsMessageTypedDict, - InputsTypedDict, - InstructRequestInputs, - InstructRequestInputsTypedDict, - ) + from .inputs import Inputs, InputsTypedDict from .instructrequest import ( InstructRequest, InstructRequestMessage, InstructRequestMessageTypedDict, InstructRequestTypedDict, ) - from .jobin import ( - Hyperparameters, - HyperparametersTypedDict, - JobIn, - JobInIntegration, - JobInIntegrationTypedDict, - JobInRepository, - JobInRepositoryTypedDict, - JobInTypedDict, - ) - from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict - from .jobsout import JobsOut, JobsOutData, JobsOutDataTypedDict, JobsOutTypedDict - from .jsonschema import JSONSchema, JSONSchemaTypedDict - from .legacyjobmetadataout import ( - LegacyJobMetadataOut, - LegacyJobMetadataOutTypedDict, - ) - from .libraryin import LibraryIn, LibraryInTypedDict - from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict - from .libraryout import LibraryOut, LibraryOutTypedDict - from .listagentaliasesop import ( - ListAgentAliasesRequest, - ListAgentAliasesRequestTypedDict, - ) - from .listagentsop import ListAgentsRequest, ListAgentsRequestTypedDict - from .listagentversionsop import ( - ListAgentVersionsRequest, - ListAgentVersionsRequestTypedDict, - ) - from .listbatchjobsop import ( - ListBatchJobsRequest, - ListBatchJobsRequestTypedDict, + from .jobmetadata import JobMetadata, JobMetadataTypedDict + from .jobs_api_routes_batch_cancel_batch_jobop import ( + JobsAPIRoutesBatchCancelBatchJobRequest, + JobsAPIRoutesBatchCancelBatchJobRequestTypedDict, + ) + from .jobs_api_routes_batch_get_batch_jobop import ( + JobsAPIRoutesBatchGetBatchJobRequest, + JobsAPIRoutesBatchGetBatchJobRequestTypedDict, + ) + from .jobs_api_routes_batch_get_batch_jobsop import ( + JobsAPIRoutesBatchGetBatchJobsRequest, + JobsAPIRoutesBatchGetBatchJobsRequestTypedDict, OrderBy, ) - from .listconversationsop import ( - ListConversationsRequest, - ListConversationsRequestTypedDict, - ListConversationsResponse, - ListConversationsResponseTypedDict, - ) - from .listdocumentout import ListDocumentOut, ListDocumentOutTypedDict - from .listdocumentsop import ListDocumentsRequest, ListDocumentsRequestTypedDict - from .listfilesop import ListFilesRequest, ListFilesRequestTypedDict - from .listfilesout import ListFilesOut, ListFilesOutTypedDict - from .listfinetuningjobsop import ( - ListFineTuningJobsRequest, - ListFineTuningJobsRequestTypedDict, - ListFineTuningJobsStatus, - ) - from .listlibraryaccessesop import ( - ListLibraryAccessesRequest, - ListLibraryAccessesRequestTypedDict, - ) - from .listlibraryout import ListLibraryOut, ListLibraryOutTypedDict + from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, + JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict, + ) + from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningCancelFineTuningJobRequest, + JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningCancelFineTuningJobResponse, + JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict, + UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse, + ) + from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningCreateFineTuningJobResponse, + JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, + Response, + ResponseTypedDict, + UnknownResponse, + ) + from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningGetFineTuningJobRequest, + JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningGetFineTuningJobResponse, + JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict, + UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse, + ) + from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( + JobsAPIRoutesFineTuningGetFineTuningJobsRequest, + JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, + JobsAPIRoutesFineTuningGetFineTuningJobsStatus, + ) + from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningStartFineTuningJobRequest, + JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningStartFineTuningJobResponse, + JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict, + UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse, + ) + from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, + JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict, + ) + from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, + JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, + JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict, + UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + ) + from .jsonschema import JSONSchema, JSONSchemaTypedDict + from .legacyjobmetadata import LegacyJobMetadata, LegacyJobMetadataTypedDict + from .libraries_delete_v1op import ( + LibrariesDeleteV1Request, + LibrariesDeleteV1RequestTypedDict, + ) + from .libraries_documents_delete_v1op import ( + LibrariesDocumentsDeleteV1Request, + LibrariesDocumentsDeleteV1RequestTypedDict, + ) + from .libraries_documents_get_extracted_text_signed_url_v1op import ( + LibrariesDocumentsGetExtractedTextSignedURLV1Request, + LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict, + ) + from .libraries_documents_get_signed_url_v1op import ( + LibrariesDocumentsGetSignedURLV1Request, + LibrariesDocumentsGetSignedURLV1RequestTypedDict, + ) + from .libraries_documents_get_status_v1op import ( + LibrariesDocumentsGetStatusV1Request, + LibrariesDocumentsGetStatusV1RequestTypedDict, + ) + from .libraries_documents_get_text_content_v1op import ( + LibrariesDocumentsGetTextContentV1Request, + LibrariesDocumentsGetTextContentV1RequestTypedDict, + ) + from .libraries_documents_get_v1op import ( + LibrariesDocumentsGetV1Request, + LibrariesDocumentsGetV1RequestTypedDict, + ) + from .libraries_documents_list_v1op import ( + LibrariesDocumentsListV1Request, + LibrariesDocumentsListV1RequestTypedDict, + ) + from .libraries_documents_reprocess_v1op import ( + LibrariesDocumentsReprocessV1Request, + LibrariesDocumentsReprocessV1RequestTypedDict, + ) + from .libraries_documents_update_v1op import ( + LibrariesDocumentsUpdateV1Request, + LibrariesDocumentsUpdateV1RequestTypedDict, + ) + from .libraries_documents_upload_v1op import ( + DocumentUpload, + DocumentUploadTypedDict, + LibrariesDocumentsUploadV1Request, + LibrariesDocumentsUploadV1RequestTypedDict, + ) + from .libraries_get_v1op import ( + LibrariesGetV1Request, + LibrariesGetV1RequestTypedDict, + ) + from .libraries_share_create_v1op import ( + LibrariesShareCreateV1Request, + LibrariesShareCreateV1RequestTypedDict, + ) + from .libraries_share_delete_v1op import ( + LibrariesShareDeleteV1Request, + LibrariesShareDeleteV1RequestTypedDict, + ) + from .libraries_share_list_v1op import ( + LibrariesShareListV1Request, + LibrariesShareListV1RequestTypedDict, + ) + from .libraries_update_v1op import ( + LibrariesUpdateV1Request, + LibrariesUpdateV1RequestTypedDict, + ) + from .library import Library, LibraryTypedDict + from .listbatchjobsresponse import ( + ListBatchJobsResponse, + ListBatchJobsResponseTypedDict, + ) + from .listdocumentsresponse import ( + ListDocumentsResponse, + ListDocumentsResponseTypedDict, + ) + from .listfilesresponse import ListFilesResponse, ListFilesResponseTypedDict + from .listfinetuningjobsresponse import ( + ListFineTuningJobsResponse, + ListFineTuningJobsResponseData, + ListFineTuningJobsResponseDataTypedDict, + ListFineTuningJobsResponseTypedDict, + UnknownListFineTuningJobsResponseData, + ) + from .listlibrariesresponse import ( + ListLibrariesResponse, + ListLibrariesResponseTypedDict, + ) from .listsharingout import ListSharingOut, ListSharingOutTypedDict from .messageentries import MessageEntries, MessageEntriesTypedDict from .messageinputcontentchunks import ( @@ -569,10 +679,8 @@ MessageInputEntry, MessageInputEntryContent, MessageInputEntryContentTypedDict, - MessageInputEntryObject, - MessageInputEntryRole, - MessageInputEntryType, MessageInputEntryTypedDict, + Role, ) from .messageoutputcontentchunks import ( MessageOutputContentChunks, @@ -582,43 +690,39 @@ MessageOutputEntry, MessageOutputEntryContent, MessageOutputEntryContentTypedDict, - MessageOutputEntryObject, - MessageOutputEntryRole, - MessageOutputEntryType, MessageOutputEntryTypedDict, ) from .messageoutputevent import ( MessageOutputEvent, MessageOutputEventContent, MessageOutputEventContentTypedDict, - MessageOutputEventRole, MessageOutputEventTypedDict, ) - from .metricout import MetricOut, MetricOutTypedDict + from .metric import Metric, MetricTypedDict from .mistralpromptmode import MistralPromptMode from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict from .modelconversation import ( ModelConversation, - ModelConversationObject, ModelConversationTool, ModelConversationToolTypedDict, ModelConversationTypedDict, + UnknownModelConversationTool, ) from .modellist import ( ModelList, ModelListData, ModelListDataTypedDict, ModelListTypedDict, + UnknownModelListData, ) from .moderationobject import ModerationObject, ModerationObjectTypedDict from .moderationresponse import ModerationResponse, ModerationResponseTypedDict - from .no_response_error import NoResponseError from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict from .ocrrequest import ( - Document, - DocumentTypedDict, + DocumentUnion, + DocumentUnionTypedDict, OCRRequest, OCRRequestTypedDict, TableFormat, @@ -640,6 +744,18 @@ RealtimeTranscriptionErrorDetailMessageTypedDict, RealtimeTranscriptionErrorDetailTypedDict, ) + from .realtimetranscriptioninputaudioappend import ( + RealtimeTranscriptionInputAudioAppend, + RealtimeTranscriptionInputAudioAppendTypedDict, + ) + from .realtimetranscriptioninputaudioend import ( + RealtimeTranscriptionInputAudioEnd, + RealtimeTranscriptionInputAudioEndTypedDict, + ) + from .realtimetranscriptioninputaudioflush import ( + RealtimeTranscriptionInputAudioFlush, + RealtimeTranscriptionInputAudioFlushTypedDict, + ) from .realtimetranscriptionsession import ( RealtimeTranscriptionSession, RealtimeTranscriptionSessionTypedDict, @@ -652,15 +768,15 @@ RealtimeTranscriptionSessionUpdated, RealtimeTranscriptionSessionUpdatedTypedDict, ) - from .referencechunk import ( - ReferenceChunk, - ReferenceChunkType, - ReferenceChunkTypedDict, + from .realtimetranscriptionsessionupdatemessage import ( + RealtimeTranscriptionSessionUpdateMessage, + RealtimeTranscriptionSessionUpdateMessageTypedDict, ) - from .reprocessdocumentop import ( - ReprocessDocumentRequest, - ReprocessDocumentRequestTypedDict, + from .realtimetranscriptionsessionupdatepayload import ( + RealtimeTranscriptionSessionUpdatePayload, + RealtimeTranscriptionSessionUpdatePayloadTypedDict, ) + from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .requestsource import RequestSource from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict @@ -670,25 +786,14 @@ ResponseStartedEvent, ResponseStartedEventTypedDict, ) - from .responsevalidationerror import ResponseValidationError - from .restartconversationop import ( - RestartConversationRequest, - RestartConversationRequestTypedDict, - ) - from .restartconversationstreamop import ( - RestartConversationStreamRequest, - RestartConversationStreamRequestTypedDict, - ) - from .retrievefileop import RetrieveFileRequest, RetrieveFileRequestTypedDict - from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict - from .retrievemodelop import ( + from .retrieve_model_v1_models_model_id_getop import ( ResponseRetrieveModelV1ModelsModelIDGet, ResponseRetrieveModelV1ModelsModelIDGetTypedDict, - RetrieveModelRequest, - RetrieveModelRequestTypedDict, + RetrieveModelV1ModelsModelIDGetRequest, + RetrieveModelV1ModelsModelIDGetRequestTypedDict, + UnknownResponseRetrieveModelV1ModelsModelIDGet, ) from .sampletype import SampleType - from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .shareenum import ShareEnum from .sharingdelete import SharingDelete, SharingDeleteTypedDict @@ -696,12 +801,6 @@ from .sharingout import SharingOut, SharingOutTypedDict from .source import Source from .ssetypes import SSETypes - from .startfinetuningjobop import ( - StartFineTuningJobRequest, - StartFineTuningJobRequestTypedDict, - StartFineTuningJobResponse, - StartFineTuningJobResponseTypedDict, - ) from .systemmessage import ( SystemMessage, SystemMessageContent, @@ -712,19 +811,24 @@ SystemMessageContentChunks, SystemMessageContentChunksTypedDict, ) - from .textchunk import TextChunk, TextChunkType, TextChunkTypedDict + from .textchunk import TextChunk, TextChunkTypedDict from .thinkchunk import ( ThinkChunk, - ThinkChunkType, + ThinkChunkThinking, + ThinkChunkThinkingTypedDict, ThinkChunkTypedDict, - Thinking, - ThinkingTypedDict, ) from .timestampgranularity import TimestampGranularity from .tool import Tool, ToolTypedDict from .toolcall import ToolCall, ToolCallTypedDict + from .toolcallconfirmation import ( + Confirmation, + ToolCallConfirmation, + ToolCallConfirmationTypedDict, + ) from .toolchoice import ToolChoice, ToolChoiceTypedDict from .toolchoiceenum import ToolChoiceEnum + from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict from .toolexecutiondeltaevent import ( ToolExecutionDeltaEvent, ToolExecutionDeltaEventName, @@ -741,8 +845,6 @@ ToolExecutionEntry, ToolExecutionEntryName, ToolExecutionEntryNameTypedDict, - ToolExecutionEntryObject, - ToolExecutionEntryType, ToolExecutionEntryTypedDict, ) from .toolexecutionstartedevent import ( @@ -755,7 +857,6 @@ ToolFileChunk, ToolFileChunkTool, ToolFileChunkToolTypedDict, - ToolFileChunkType, ToolFileChunkTypedDict, ) from .toolmessage import ( @@ -768,7 +869,6 @@ ToolReferenceChunk, ToolReferenceChunkTool, ToolReferenceChunkToolTypedDict, - ToolReferenceChunkType, ToolReferenceChunkTypedDict, ) from .tooltypes import ToolTypes @@ -779,7 +879,6 @@ ) from .transcriptionsegmentchunk import ( TranscriptionSegmentChunk, - TranscriptionSegmentChunkType, TranscriptionSegmentChunkTypedDict, ) from .transcriptionstreamdone import ( @@ -791,6 +890,7 @@ TranscriptionStreamEventsData, TranscriptionStreamEventsDataTypedDict, TranscriptionStreamEventsTypedDict, + UnknownTranscriptionStreamEventsData, ) from .transcriptionstreameventtypes import TranscriptionStreamEventTypes from .transcriptionstreamlanguage import ( @@ -805,34 +905,27 @@ TranscriptionStreamTextDelta, TranscriptionStreamTextDeltaTypedDict, ) - from .unarchiveftmodelout import UnarchiveFTModelOut, UnarchiveFTModelOutTypedDict - from .unarchivemodelop import UnarchiveModelRequest, UnarchiveModelRequestTypedDict - from .updateagentop import UpdateAgentRequest, UpdateAgentRequestTypedDict - from .updateagentversionop import ( - UpdateAgentVersionRequest, - UpdateAgentVersionRequestTypedDict, - ) - from .updatedocumentop import UpdateDocumentRequest, UpdateDocumentRequestTypedDict - from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict - from .updatelibraryop import UpdateLibraryRequest, UpdateLibraryRequestTypedDict - from .updatemodelop import ( - UpdateModelRequest, - UpdateModelRequestTypedDict, - UpdateModelResponse, - UpdateModelResponseTypedDict, - ) - from .updateorcreatelibraryaccessop import ( - UpdateOrCreateLibraryAccessRequest, - UpdateOrCreateLibraryAccessRequestTypedDict, - ) - from .uploaddocumentop import ( - DocumentUpload, - DocumentUploadTypedDict, - UploadDocumentRequest, - UploadDocumentRequestTypedDict, + from .unarchivemodelresponse import ( + UnarchiveModelResponse, + UnarchiveModelResponseTypedDict, + ) + from .updateagentrequest import ( + UpdateAgentRequest, + UpdateAgentRequestTool, + UpdateAgentRequestToolTypedDict, + UpdateAgentRequestTypedDict, + ) + from .updatedocumentrequest import ( + Attributes, + AttributesTypedDict, + UpdateDocumentRequest, + UpdateDocumentRequestTypedDict, ) - from .uploadfileop import MultiPartBodyParams, MultiPartBodyParamsTypedDict - from .uploadfileout import UploadFileOut, UploadFileOutTypedDict + from .updatelibraryrequest import ( + UpdateLibraryRequest, + UpdateLibraryRequestTypedDict, + ) + from .updatemodelrequest import UpdateModelRequest, UpdateModelRequestTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from .usermessage import ( UserMessage, @@ -847,7 +940,10 @@ ValidationErrorTypedDict, ) from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict - from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict + from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, + ) from .websearchpremiumtool import ( WebSearchPremiumTool, WebSearchPremiumToolTypedDict, @@ -862,28 +958,58 @@ "AgentConversation", "AgentConversationAgentVersion", "AgentConversationAgentVersionTypedDict", - "AgentConversationObject", "AgentConversationTypedDict", - "AgentCreationRequest", - "AgentCreationRequestTool", - "AgentCreationRequestToolTypedDict", - "AgentCreationRequestTypedDict", "AgentHandoffDoneEvent", "AgentHandoffDoneEventTypedDict", "AgentHandoffEntry", - "AgentHandoffEntryObject", - "AgentHandoffEntryType", "AgentHandoffEntryTypedDict", "AgentHandoffStartedEvent", "AgentHandoffStartedEventTypedDict", - "AgentObject", "AgentTool", "AgentToolTypedDict", "AgentTypedDict", - "AgentUpdateRequest", - "AgentUpdateRequestTool", - "AgentUpdateRequestToolTypedDict", - "AgentUpdateRequestTypedDict", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequest", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict", + "AgentsAPIV1AgentsDeleteAliasRequest", + "AgentsAPIV1AgentsDeleteAliasRequestTypedDict", + "AgentsAPIV1AgentsDeleteRequest", + "AgentsAPIV1AgentsDeleteRequestTypedDict", + "AgentsAPIV1AgentsGetAgentVersion", + "AgentsAPIV1AgentsGetAgentVersionTypedDict", + "AgentsAPIV1AgentsGetRequest", + "AgentsAPIV1AgentsGetRequestTypedDict", + "AgentsAPIV1AgentsGetVersionRequest", + "AgentsAPIV1AgentsGetVersionRequestTypedDict", + "AgentsAPIV1AgentsListRequest", + "AgentsAPIV1AgentsListRequestTypedDict", + "AgentsAPIV1AgentsListVersionAliasesRequest", + "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict", + "AgentsAPIV1AgentsListVersionsRequest", + "AgentsAPIV1AgentsListVersionsRequestTypedDict", + "AgentsAPIV1AgentsUpdateRequest", + "AgentsAPIV1AgentsUpdateRequestTypedDict", + "AgentsAPIV1AgentsUpdateVersionRequest", + "AgentsAPIV1AgentsUpdateVersionRequestTypedDict", + "AgentsAPIV1ConversationsAppendRequest", + "AgentsAPIV1ConversationsAppendRequestTypedDict", + "AgentsAPIV1ConversationsAppendStreamRequest", + "AgentsAPIV1ConversationsAppendStreamRequestTypedDict", + "AgentsAPIV1ConversationsDeleteRequest", + "AgentsAPIV1ConversationsDeleteRequestTypedDict", + "AgentsAPIV1ConversationsGetRequest", + "AgentsAPIV1ConversationsGetRequestTypedDict", + "AgentsAPIV1ConversationsHistoryRequest", + "AgentsAPIV1ConversationsHistoryRequestTypedDict", + "AgentsAPIV1ConversationsListRequest", + "AgentsAPIV1ConversationsListRequestTypedDict", + "AgentsAPIV1ConversationsListResponse", + "AgentsAPIV1ConversationsListResponseTypedDict", + "AgentsAPIV1ConversationsMessagesRequest", + "AgentsAPIV1ConversationsMessagesRequestTypedDict", + "AgentsAPIV1ConversationsRestartRequest", + "AgentsAPIV1ConversationsRestartRequestTypedDict", + "AgentsAPIV1ConversationsRestartStreamRequest", + "AgentsAPIV1ConversationsRestartStreamRequestTypedDict", "AgentsCompletionRequest", "AgentsCompletionRequestMessage", "AgentsCompletionRequestMessageTypedDict", @@ -900,20 +1026,13 @@ "AgentsCompletionStreamRequestToolChoice", "AgentsCompletionStreamRequestToolChoiceTypedDict", "AgentsCompletionStreamRequestTypedDict", - "AppendConversationRequest", - "AppendConversationRequestTypedDict", - "AppendConversationStreamRequest", - "AppendConversationStreamRequestTypedDict", - "ArchiveFTModelOut", - "ArchiveFTModelOutTypedDict", - "ArchiveModelRequest", - "ArchiveModelRequestTypedDict", + "ArchiveModelResponse", + "ArchiveModelResponseTypedDict", "Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageContent", "AssistantMessageContentTypedDict", - "AssistantMessageRole", "AssistantMessageTypedDict", "Attributes", "AttributesTypedDict", @@ -930,22 +1049,12 @@ "BaseModelCardTypedDict", "BatchError", "BatchErrorTypedDict", - "BatchJobIn", - "BatchJobInTypedDict", - "BatchJobOut", - "BatchJobOutTypedDict", + "BatchJob", "BatchJobStatus", - "BatchJobsOut", - "BatchJobsOutTypedDict", + "BatchJobTypedDict", "BatchRequest", "BatchRequestTypedDict", "BuiltInConnectors", - "CancelBatchJobRequest", - "CancelBatchJobRequestTypedDict", - "CancelFineTuningJobRequest", - "CancelFineTuningJobRequestTypedDict", - "CancelFineTuningJobResponse", - "CancelFineTuningJobResponseTypedDict", "ChatClassificationRequest", "ChatClassificationRequestTypedDict", "ChatCompletionChoice", @@ -977,8 +1086,8 @@ "ChatModerationRequestInputs3", "ChatModerationRequestInputs3TypedDict", "ChatModerationRequestTypedDict", - "CheckpointOut", - "CheckpointOutTypedDict", + "Checkpoint", + "CheckpointTypedDict", "ClassificationRequest", "ClassificationRequestInputs", "ClassificationRequestInputsTypedDict", @@ -987,25 +1096,23 @@ "ClassificationResponseTypedDict", "ClassificationTargetResult", "ClassificationTargetResultTypedDict", - "ClassifierDetailedJobOut", - "ClassifierDetailedJobOutIntegration", - "ClassifierDetailedJobOutIntegrationTypedDict", - "ClassifierDetailedJobOutStatus", - "ClassifierDetailedJobOutTypedDict", - "ClassifierFTModelOut", - "ClassifierFTModelOutTypedDict", - "ClassifierJobOut", - "ClassifierJobOutIntegration", - "ClassifierJobOutIntegrationTypedDict", - "ClassifierJobOutStatus", - "ClassifierJobOutTypedDict", - "ClassifierTargetIn", - "ClassifierTargetInTypedDict", - "ClassifierTargetOut", - "ClassifierTargetOutTypedDict", + "ClassifierFineTunedModel", + "ClassifierFineTunedModelTypedDict", + "ClassifierFineTuningJob", + "ClassifierFineTuningJobDetails", + "ClassifierFineTuningJobDetailsIntegration", + "ClassifierFineTuningJobDetailsIntegrationTypedDict", + "ClassifierFineTuningJobDetailsStatus", + "ClassifierFineTuningJobDetailsTypedDict", + "ClassifierFineTuningJobIntegration", + "ClassifierFineTuningJobIntegrationTypedDict", + "ClassifierFineTuningJobStatus", + "ClassifierFineTuningJobTypedDict", + "ClassifierTarget", + "ClassifierTargetResult", + "ClassifierTargetResultTypedDict", + "ClassifierTargetTypedDict", "ClassifierTrainingParameters", - "ClassifierTrainingParametersIn", - "ClassifierTrainingParametersInTypedDict", "ClassifierTrainingParametersTypedDict", "CodeInterpreterTool", "CodeInterpreterToolTypedDict", @@ -1015,31 +1122,30 @@ "CompletionArgsTypedDict", "CompletionChunk", "CompletionChunkTypedDict", - "CompletionDetailedJobOut", - "CompletionDetailedJobOutIntegration", - "CompletionDetailedJobOutIntegrationTypedDict", - "CompletionDetailedJobOutRepository", - "CompletionDetailedJobOutRepositoryTypedDict", - "CompletionDetailedJobOutStatus", - "CompletionDetailedJobOutTypedDict", "CompletionEvent", "CompletionEventTypedDict", - "CompletionFTModelOut", - "CompletionFTModelOutTypedDict", - "CompletionJobOut", - "CompletionJobOutIntegration", - "CompletionJobOutIntegrationTypedDict", - "CompletionJobOutRepository", - "CompletionJobOutRepositoryTypedDict", - "CompletionJobOutStatus", - "CompletionJobOutTypedDict", + "CompletionFineTunedModel", + "CompletionFineTunedModelTypedDict", + "CompletionFineTuningJob", + "CompletionFineTuningJobDetails", + "CompletionFineTuningJobDetailsIntegration", + "CompletionFineTuningJobDetailsIntegrationTypedDict", + "CompletionFineTuningJobDetailsRepository", + "CompletionFineTuningJobDetailsRepositoryTypedDict", + "CompletionFineTuningJobDetailsStatus", + "CompletionFineTuningJobDetailsTypedDict", + "CompletionFineTuningJobIntegration", + "CompletionFineTuningJobIntegrationTypedDict", + "CompletionFineTuningJobRepository", + "CompletionFineTuningJobRepositoryTypedDict", + "CompletionFineTuningJobStatus", + "CompletionFineTuningJobTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", "CompletionTrainingParameters", - "CompletionTrainingParametersIn", - "CompletionTrainingParametersInTypedDict", "CompletionTrainingParametersTypedDict", + "Confirmation", "ContentChunk", "ContentChunkTypedDict", "ConversationAppendRequest", @@ -1053,12 +1159,10 @@ "ConversationEventsDataTypedDict", "ConversationEventsTypedDict", "ConversationHistory", - "ConversationHistoryObject", "ConversationHistoryTypedDict", "ConversationInputs", "ConversationInputsTypedDict", "ConversationMessages", - "ConversationMessagesObject", "ConversationMessagesTypedDict", "ConversationRequest", "ConversationRequestAgentVersion", @@ -1068,7 +1172,6 @@ "ConversationRequestToolTypedDict", "ConversationRequestTypedDict", "ConversationResponse", - "ConversationResponseObject", "ConversationResponseTypedDict", "ConversationRestartRequest", "ConversationRestartRequestAgentVersion", @@ -1087,32 +1190,34 @@ "ConversationStreamRequestTool", "ConversationStreamRequestToolTypedDict", "ConversationStreamRequestTypedDict", + "ConversationThinkChunk", + "ConversationThinkChunkThinking", + "ConversationThinkChunkThinkingTypedDict", + "ConversationThinkChunkTypedDict", "ConversationUsageInfo", "ConversationUsageInfoTypedDict", - "CreateFineTuningJobResponse", - "CreateFineTuningJobResponseTypedDict", - "CreateOrUpdateAgentAliasRequest", - "CreateOrUpdateAgentAliasRequestTypedDict", - "DeleteAgentAliasRequest", - "DeleteAgentAliasRequestTypedDict", - "DeleteAgentRequest", - "DeleteAgentRequestTypedDict", - "DeleteConversationRequest", - "DeleteConversationRequestTypedDict", - "DeleteDocumentRequest", - "DeleteDocumentRequestTypedDict", - "DeleteFileOut", - "DeleteFileOutTypedDict", - "DeleteFileRequest", - "DeleteFileRequestTypedDict", - "DeleteLibraryAccessRequest", - "DeleteLibraryAccessRequestTypedDict", - "DeleteLibraryRequest", - "DeleteLibraryRequestTypedDict", + "CreateAgentRequest", + "CreateAgentRequestTool", + "CreateAgentRequestToolTypedDict", + "CreateAgentRequestTypedDict", + "CreateBatchJobRequest", + "CreateBatchJobRequestTypedDict", + "CreateFileResponse", + "CreateFileResponseTypedDict", + "CreateFineTuningJobRequest", + "CreateFineTuningJobRequestIntegration", + "CreateFineTuningJobRequestIntegrationTypedDict", + "CreateFineTuningJobRequestRepository", + "CreateFineTuningJobRequestRepositoryTypedDict", + "CreateFineTuningJobRequestTypedDict", + "CreateLibraryRequest", + "CreateLibraryRequestTypedDict", + "DeleteFileResponse", + "DeleteFileResponseTypedDict", "DeleteModelOut", "DeleteModelOutTypedDict", - "DeleteModelRequest", - "DeleteModelRequestTypedDict", + "DeleteModelV1ModelsModelIDDeleteRequest", + "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", "DeltaMessageContent", "DeltaMessageContentTypedDict", @@ -1120,20 +1225,15 @@ "Document", "DocumentLibraryTool", "DocumentLibraryToolTypedDict", - "DocumentOut", - "DocumentOutTypedDict", "DocumentTextContent", "DocumentTextContentTypedDict", "DocumentTypedDict", "DocumentURLChunk", - "DocumentURLChunkType", "DocumentURLChunkTypedDict", - "DocumentUpdateIn", - "DocumentUpdateInTypedDict", + "DocumentUnion", + "DocumentUnionTypedDict", "DocumentUpload", "DocumentUploadTypedDict", - "DownloadFileRequest", - "DownloadFileRequestTypedDict", "EmbeddingDtype", "EmbeddingRequest", "EmbeddingRequestInputs", @@ -1147,8 +1247,8 @@ "EntityType", "Entry", "EntryTypedDict", - "EventOut", - "EventOutTypedDict", + "Event", + "EventTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", @@ -1160,8 +1260,6 @@ "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FTClassifierLossFunction", - "FTModelCapabilitiesOut", - "FTModelCapabilitiesOutTypedDict", "FTModelCard", "FTModelCardTypedDict", "File", @@ -1170,76 +1268,54 @@ "FilePurpose", "FileSchema", "FileSchemaTypedDict", - "FileSignedURL", - "FileSignedURLTypedDict", "FileTypedDict", + "FilesAPIRoutesDeleteFileRequest", + "FilesAPIRoutesDeleteFileRequestTypedDict", + "FilesAPIRoutesDownloadFileRequest", + "FilesAPIRoutesDownloadFileRequestTypedDict", + "FilesAPIRoutesGetSignedURLRequest", + "FilesAPIRoutesGetSignedURLRequestTypedDict", + "FilesAPIRoutesListFilesRequest", + "FilesAPIRoutesListFilesRequestTypedDict", + "FilesAPIRoutesRetrieveFileRequest", + "FilesAPIRoutesRetrieveFileRequestTypedDict", "FineTuneableModelType", + "FineTunedModelCapabilities", + "FineTunedModelCapabilitiesTypedDict", "Format", "Function", "FunctionCall", "FunctionCallEntry", "FunctionCallEntryArguments", "FunctionCallEntryArgumentsTypedDict", - "FunctionCallEntryObject", - "FunctionCallEntryType", + "FunctionCallEntryConfirmationStatus", "FunctionCallEntryTypedDict", "FunctionCallEvent", + "FunctionCallEventConfirmationStatus", "FunctionCallEventTypedDict", "FunctionCallTypedDict", "FunctionName", "FunctionNameTypedDict", "FunctionResultEntry", - "FunctionResultEntryObject", - "FunctionResultEntryType", "FunctionResultEntryTypedDict", "FunctionTool", "FunctionToolTypedDict", "FunctionTypedDict", - "GetAgentAgentVersion", - "GetAgentAgentVersionTypedDict", - "GetAgentRequest", - "GetAgentRequestTypedDict", - "GetAgentVersionRequest", - "GetAgentVersionRequestTypedDict", - "GetBatchJobRequest", - "GetBatchJobRequestTypedDict", - "GetConversationHistoryRequest", - "GetConversationHistoryRequestTypedDict", - "GetConversationMessagesRequest", - "GetConversationMessagesRequestTypedDict", - "GetConversationRequest", - "GetConversationRequestTypedDict", - "GetDocumentExtractedTextSignedURLRequest", - "GetDocumentExtractedTextSignedURLRequestTypedDict", - "GetDocumentRequest", - "GetDocumentRequestTypedDict", - "GetDocumentSignedURLRequest", - "GetDocumentSignedURLRequestTypedDict", - "GetDocumentStatusRequest", - "GetDocumentStatusRequestTypedDict", - "GetDocumentTextContentRequest", - "GetDocumentTextContentRequestTypedDict", - "GetFileSignedURLRequest", - "GetFileSignedURLRequestTypedDict", - "GetFineTuningJobRequest", - "GetFineTuningJobRequestTypedDict", - "GetFineTuningJobResponse", - "GetFineTuningJobResponseTypedDict", - "GetLibraryRequest", - "GetLibraryRequestTypedDict", + "GetFileResponse", + "GetFileResponseTypedDict", + "GetSignedURLResponse", + "GetSignedURLResponseTypedDict", + "GithubRepository", "GithubRepositoryIn", "GithubRepositoryInTypedDict", - "GithubRepositoryOut", - "GithubRepositoryOutTypedDict", - "HTTPValidationError", - "HTTPValidationErrorData", + "GithubRepositoryTypedDict", "Hyperparameters", "HyperparametersTypedDict", + "ImageDetail", "ImageGenerationTool", "ImageGenerationToolTypedDict", "ImageURL", "ImageURLChunk", - "ImageURLChunkType", "ImageURLChunkTypedDict", "ImageURLTypedDict", "ImageURLUnion", @@ -1247,64 +1323,94 @@ "InputEntries", "InputEntriesTypedDict", "Inputs", - "InputsMessage", - "InputsMessageTypedDict", "InputsTypedDict", "InstructRequest", - "InstructRequestInputs", - "InstructRequestInputsTypedDict", "InstructRequestMessage", "InstructRequestMessageTypedDict", "InstructRequestTypedDict", "JSONSchema", "JSONSchemaTypedDict", - "JobIn", - "JobInIntegration", - "JobInIntegrationTypedDict", - "JobInRepository", - "JobInRepositoryTypedDict", - "JobInTypedDict", - "JobMetadataOut", - "JobMetadataOutTypedDict", - "JobsOut", - "JobsOutData", - "JobsOutDataTypedDict", - "JobsOutTypedDict", - "LegacyJobMetadataOut", - "LegacyJobMetadataOutTypedDict", - "LibraryIn", - "LibraryInTypedDict", - "LibraryInUpdate", - "LibraryInUpdateTypedDict", - "LibraryOut", - "LibraryOutTypedDict", - "ListAgentAliasesRequest", - "ListAgentAliasesRequestTypedDict", - "ListAgentVersionsRequest", - "ListAgentVersionsRequestTypedDict", - "ListAgentsRequest", - "ListAgentsRequestTypedDict", - "ListBatchJobsRequest", - "ListBatchJobsRequestTypedDict", - "ListConversationsRequest", - "ListConversationsRequestTypedDict", - "ListConversationsResponse", - "ListConversationsResponseTypedDict", - "ListDocumentOut", - "ListDocumentOutTypedDict", - "ListDocumentsRequest", - "ListDocumentsRequestTypedDict", - "ListFilesOut", - "ListFilesOutTypedDict", - "ListFilesRequest", - "ListFilesRequestTypedDict", - "ListFineTuningJobsRequest", - "ListFineTuningJobsRequestTypedDict", - "ListFineTuningJobsStatus", - "ListLibraryAccessesRequest", - "ListLibraryAccessesRequestTypedDict", - "ListLibraryOut", - "ListLibraryOutTypedDict", + "JobMetadata", + "JobMetadataTypedDict", + "JobsAPIRoutesBatchCancelBatchJobRequest", + "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict", + "JobsAPIRoutesBatchGetBatchJobRequest", + "JobsAPIRoutesBatchGetBatchJobRequestTypedDict", + "JobsAPIRoutesBatchGetBatchJobsRequest", + "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponse", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobRequest", + "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobResponse", + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobsStatus", + "JobsAPIRoutesFineTuningStartFineTuningJobRequest", + "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningStartFineTuningJobResponse", + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", + "LegacyJobMetadata", + "LegacyJobMetadataTypedDict", + "LibrariesDeleteV1Request", + "LibrariesDeleteV1RequestTypedDict", + "LibrariesDocumentsDeleteV1Request", + "LibrariesDocumentsDeleteV1RequestTypedDict", + "LibrariesDocumentsGetExtractedTextSignedURLV1Request", + "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict", + "LibrariesDocumentsGetSignedURLV1Request", + "LibrariesDocumentsGetSignedURLV1RequestTypedDict", + "LibrariesDocumentsGetStatusV1Request", + "LibrariesDocumentsGetStatusV1RequestTypedDict", + "LibrariesDocumentsGetTextContentV1Request", + "LibrariesDocumentsGetTextContentV1RequestTypedDict", + "LibrariesDocumentsGetV1Request", + "LibrariesDocumentsGetV1RequestTypedDict", + "LibrariesDocumentsListV1Request", + "LibrariesDocumentsListV1RequestTypedDict", + "LibrariesDocumentsReprocessV1Request", + "LibrariesDocumentsReprocessV1RequestTypedDict", + "LibrariesDocumentsUpdateV1Request", + "LibrariesDocumentsUpdateV1RequestTypedDict", + "LibrariesDocumentsUploadV1Request", + "LibrariesDocumentsUploadV1RequestTypedDict", + "LibrariesGetV1Request", + "LibrariesGetV1RequestTypedDict", + "LibrariesShareCreateV1Request", + "LibrariesShareCreateV1RequestTypedDict", + "LibrariesShareDeleteV1Request", + "LibrariesShareDeleteV1RequestTypedDict", + "LibrariesShareListV1Request", + "LibrariesShareListV1RequestTypedDict", + "LibrariesUpdateV1Request", + "LibrariesUpdateV1RequestTypedDict", + "Library", + "LibraryTypedDict", + "ListBatchJobsResponse", + "ListBatchJobsResponseTypedDict", + "ListDocumentsResponse", + "ListDocumentsResponseTypedDict", + "ListFilesResponse", + "ListFilesResponseTypedDict", + "ListFineTuningJobsResponse", + "ListFineTuningJobsResponseData", + "ListFineTuningJobsResponseDataTypedDict", + "ListFineTuningJobsResponseTypedDict", + "ListLibrariesResponse", + "ListLibrariesResponseTypedDict", "ListSharingOut", "ListSharingOutTypedDict", "Loc", @@ -1316,32 +1422,23 @@ "MessageInputEntry", "MessageInputEntryContent", "MessageInputEntryContentTypedDict", - "MessageInputEntryObject", - "MessageInputEntryRole", - "MessageInputEntryType", "MessageInputEntryTypedDict", "MessageOutputContentChunks", "MessageOutputContentChunksTypedDict", "MessageOutputEntry", "MessageOutputEntryContent", "MessageOutputEntryContentTypedDict", - "MessageOutputEntryObject", - "MessageOutputEntryRole", - "MessageOutputEntryType", "MessageOutputEntryTypedDict", "MessageOutputEvent", "MessageOutputEventContent", "MessageOutputEventContentTypedDict", - "MessageOutputEventRole", "MessageOutputEventTypedDict", - "MetricOut", - "MetricOutTypedDict", - "MistralError", + "Metric", + "MetricTypedDict", "MistralPromptMode", "ModelCapabilities", "ModelCapabilitiesTypedDict", "ModelConversation", - "ModelConversationObject", "ModelConversationTool", "ModelConversationToolTypedDict", "ModelConversationTypedDict", @@ -1355,7 +1452,6 @@ "ModerationResponseTypedDict", "MultiPartBodyParams", "MultiPartBodyParamsTypedDict", - "NoResponseError", "OCRImageObject", "OCRImageObjectTypedDict", "OCRPageDimensions", @@ -1387,17 +1483,24 @@ "RealtimeTranscriptionErrorDetailMessageTypedDict", "RealtimeTranscriptionErrorDetailTypedDict", "RealtimeTranscriptionErrorTypedDict", + "RealtimeTranscriptionInputAudioAppend", + "RealtimeTranscriptionInputAudioAppendTypedDict", + "RealtimeTranscriptionInputAudioEnd", + "RealtimeTranscriptionInputAudioEndTypedDict", + "RealtimeTranscriptionInputAudioFlush", + "RealtimeTranscriptionInputAudioFlushTypedDict", "RealtimeTranscriptionSession", "RealtimeTranscriptionSessionCreated", "RealtimeTranscriptionSessionCreatedTypedDict", "RealtimeTranscriptionSessionTypedDict", + "RealtimeTranscriptionSessionUpdateMessage", + "RealtimeTranscriptionSessionUpdateMessageTypedDict", + "RealtimeTranscriptionSessionUpdatePayload", + "RealtimeTranscriptionSessionUpdatePayloadTypedDict", "RealtimeTranscriptionSessionUpdated", "RealtimeTranscriptionSessionUpdatedTypedDict", "ReferenceChunk", - "ReferenceChunkType", "ReferenceChunkTypedDict", - "ReprocessDocumentRequest", - "ReprocessDocumentRequestTypedDict", "RequestSource", "Response", "ResponseDoneEvent", @@ -1414,18 +1517,9 @@ "ResponseTypedDict", "ResponseV1ConversationsGet", "ResponseV1ConversationsGetTypedDict", - "ResponseValidationError", - "RestartConversationRequest", - "RestartConversationRequestTypedDict", - "RestartConversationStreamRequest", - "RestartConversationStreamRequestTypedDict", - "RetrieveFileOut", - "RetrieveFileOutTypedDict", - "RetrieveFileRequest", - "RetrieveFileRequestTypedDict", - "RetrieveModelRequest", - "RetrieveModelRequestTypedDict", - "SDKError", + "RetrieveModelV1ModelsModelIDGetRequest", + "RetrieveModelV1ModelsModelIDGetRequestTypedDict", + "Role", "SSETypes", "SampleType", "Security", @@ -1438,10 +1532,6 @@ "SharingOut", "SharingOutTypedDict", "Source", - "StartFineTuningJobRequest", - "StartFineTuningJobRequestTypedDict", - "StartFineTuningJobResponse", - "StartFineTuningJobResponseTypedDict", "SystemMessage", "SystemMessageContent", "SystemMessageContentChunks", @@ -1450,20 +1540,22 @@ "SystemMessageTypedDict", "TableFormat", "TextChunk", - "TextChunkType", "TextChunkTypedDict", "ThinkChunk", - "ThinkChunkType", + "ThinkChunkThinking", + "ThinkChunkThinkingTypedDict", "ThinkChunkTypedDict", - "Thinking", - "ThinkingTypedDict", "TimestampGranularity", "Tool", "ToolCall", + "ToolCallConfirmation", + "ToolCallConfirmationTypedDict", "ToolCallTypedDict", "ToolChoice", "ToolChoiceEnum", "ToolChoiceTypedDict", + "ToolConfiguration", + "ToolConfigurationTypedDict", "ToolExecutionDeltaEvent", "ToolExecutionDeltaEventName", "ToolExecutionDeltaEventNameTypedDict", @@ -1475,8 +1567,6 @@ "ToolExecutionEntry", "ToolExecutionEntryName", "ToolExecutionEntryNameTypedDict", - "ToolExecutionEntryObject", - "ToolExecutionEntryType", "ToolExecutionEntryTypedDict", "ToolExecutionStartedEvent", "ToolExecutionStartedEventName", @@ -1485,7 +1575,6 @@ "ToolFileChunk", "ToolFileChunkTool", "ToolFileChunkToolTypedDict", - "ToolFileChunkType", "ToolFileChunkTypedDict", "ToolMessage", "ToolMessageContent", @@ -1494,7 +1583,6 @@ "ToolReferenceChunk", "ToolReferenceChunkTool", "ToolReferenceChunkToolTypedDict", - "ToolReferenceChunkType", "ToolReferenceChunkTypedDict", "ToolTypedDict", "ToolTypes", @@ -1503,7 +1591,6 @@ "TranscriptionResponse", "TranscriptionResponseTypedDict", "TranscriptionSegmentChunk", - "TranscriptionSegmentChunkType", "TranscriptionSegmentChunkTypedDict", "TranscriptionStreamDone", "TranscriptionStreamDoneTypedDict", @@ -1518,30 +1605,37 @@ "TranscriptionStreamSegmentDeltaTypedDict", "TranscriptionStreamTextDelta", "TranscriptionStreamTextDeltaTypedDict", - "UnarchiveFTModelOut", - "UnarchiveFTModelOutTypedDict", - "UnarchiveModelRequest", - "UnarchiveModelRequestTypedDict", + "UnarchiveModelResponse", + "UnarchiveModelResponseTypedDict", + "UnknownAgentTool", + "UnknownClassifierFineTuningJobDetailsIntegration", + "UnknownClassifierFineTuningJobIntegration", + "UnknownCompletionFineTuningJobDetailsIntegration", + "UnknownCompletionFineTuningJobDetailsRepository", + "UnknownCompletionFineTuningJobIntegration", + "UnknownCompletionFineTuningJobRepository", + "UnknownContentChunk", + "UnknownConversationEventsData", + "UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse", + "UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse", + "UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse", + "UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse", + "UnknownListFineTuningJobsResponseData", + "UnknownModelConversationTool", + "UnknownModelListData", + "UnknownResponse", + "UnknownResponseRetrieveModelV1ModelsModelIDGet", + "UnknownTranscriptionStreamEventsData", "UpdateAgentRequest", + "UpdateAgentRequestTool", + "UpdateAgentRequestToolTypedDict", "UpdateAgentRequestTypedDict", - "UpdateAgentVersionRequest", - "UpdateAgentVersionRequestTypedDict", "UpdateDocumentRequest", "UpdateDocumentRequestTypedDict", - "UpdateFTModelIn", - "UpdateFTModelInTypedDict", "UpdateLibraryRequest", "UpdateLibraryRequestTypedDict", "UpdateModelRequest", "UpdateModelRequestTypedDict", - "UpdateModelResponse", - "UpdateModelResponseTypedDict", - "UpdateOrCreateLibraryAccessRequest", - "UpdateOrCreateLibraryAccessRequestTypedDict", - "UploadDocumentRequest", - "UploadDocumentRequestTypedDict", - "UploadFileOut", - "UploadFileOutTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", @@ -1551,8 +1645,8 @@ "ValidationError", "ValidationErrorTypedDict", "WandbIntegration", - "WandbIntegrationOut", - "WandbIntegrationOutTypedDict", + "WandbIntegrationResult", + "WandbIntegrationResultTypedDict", "WandbIntegrationTypedDict", "WebSearchPremiumTool", "WebSearchPremiumToolTypedDict", @@ -1562,29 +1656,66 @@ _dynamic_imports: dict[str, str] = { "Agent": ".agent", - "AgentObject": ".agent", "AgentTool": ".agent", "AgentToolTypedDict": ".agent", "AgentTypedDict": ".agent", + "UnknownAgentTool": ".agent", "AgentAliasResponse": ".agentaliasresponse", "AgentAliasResponseTypedDict": ".agentaliasresponse", "AgentConversation": ".agentconversation", "AgentConversationAgentVersion": ".agentconversation", "AgentConversationAgentVersionTypedDict": ".agentconversation", - "AgentConversationObject": ".agentconversation", "AgentConversationTypedDict": ".agentconversation", - "AgentCreationRequest": ".agentcreationrequest", - "AgentCreationRequestTool": ".agentcreationrequest", - "AgentCreationRequestToolTypedDict": ".agentcreationrequest", - "AgentCreationRequestTypedDict": ".agentcreationrequest", "AgentHandoffDoneEvent": ".agenthandoffdoneevent", "AgentHandoffDoneEventTypedDict": ".agenthandoffdoneevent", "AgentHandoffEntry": ".agenthandoffentry", - "AgentHandoffEntryObject": ".agenthandoffentry", - "AgentHandoffEntryType": ".agenthandoffentry", "AgentHandoffEntryTypedDict": ".agenthandoffentry", "AgentHandoffStartedEvent": ".agenthandoffstartedevent", "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequest": ".agents_api_v1_agents_create_or_update_aliasop", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict": ".agents_api_v1_agents_create_or_update_aliasop", + "AgentsAPIV1AgentsDeleteAliasRequest": ".agents_api_v1_agents_delete_aliasop", + "AgentsAPIV1AgentsDeleteAliasRequestTypedDict": ".agents_api_v1_agents_delete_aliasop", + "AgentsAPIV1AgentsDeleteRequest": ".agents_api_v1_agents_deleteop", + "AgentsAPIV1AgentsDeleteRequestTypedDict": ".agents_api_v1_agents_deleteop", + "AgentsAPIV1AgentsGetVersionRequest": ".agents_api_v1_agents_get_versionop", + "AgentsAPIV1AgentsGetVersionRequestTypedDict": ".agents_api_v1_agents_get_versionop", + "AgentsAPIV1AgentsGetAgentVersion": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsGetAgentVersionTypedDict": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsListVersionAliasesRequest": ".agents_api_v1_agents_list_version_aliasesop", + "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict": ".agents_api_v1_agents_list_version_aliasesop", + "AgentsAPIV1AgentsListVersionsRequest": ".agents_api_v1_agents_list_versionsop", + "AgentsAPIV1AgentsListVersionsRequestTypedDict": ".agents_api_v1_agents_list_versionsop", + "AgentsAPIV1AgentsListRequest": ".agents_api_v1_agents_listop", + "AgentsAPIV1AgentsListRequestTypedDict": ".agents_api_v1_agents_listop", + "AgentsAPIV1AgentsUpdateVersionRequest": ".agents_api_v1_agents_update_versionop", + "AgentsAPIV1AgentsUpdateVersionRequestTypedDict": ".agents_api_v1_agents_update_versionop", + "AgentsAPIV1AgentsUpdateRequest": ".agents_api_v1_agents_updateop", + "AgentsAPIV1AgentsUpdateRequestTypedDict": ".agents_api_v1_agents_updateop", + "AgentsAPIV1ConversationsAppendStreamRequest": ".agents_api_v1_conversations_append_streamop", + "AgentsAPIV1ConversationsAppendStreamRequestTypedDict": ".agents_api_v1_conversations_append_streamop", + "AgentsAPIV1ConversationsAppendRequest": ".agents_api_v1_conversations_appendop", + "AgentsAPIV1ConversationsAppendRequestTypedDict": ".agents_api_v1_conversations_appendop", + "AgentsAPIV1ConversationsDeleteRequest": ".agents_api_v1_conversations_deleteop", + "AgentsAPIV1ConversationsDeleteRequestTypedDict": ".agents_api_v1_conversations_deleteop", + "AgentsAPIV1ConversationsGetRequest": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsGetRequestTypedDict": ".agents_api_v1_conversations_getop", + "ResponseV1ConversationsGet": ".agents_api_v1_conversations_getop", + "ResponseV1ConversationsGetTypedDict": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsHistoryRequest": ".agents_api_v1_conversations_historyop", + "AgentsAPIV1ConversationsHistoryRequestTypedDict": ".agents_api_v1_conversations_historyop", + "AgentsAPIV1ConversationsListRequest": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListRequestTypedDict": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListResponse": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListResponseTypedDict": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsMessagesRequest": ".agents_api_v1_conversations_messagesop", + "AgentsAPIV1ConversationsMessagesRequestTypedDict": ".agents_api_v1_conversations_messagesop", + "AgentsAPIV1ConversationsRestartStreamRequest": ".agents_api_v1_conversations_restart_streamop", + "AgentsAPIV1ConversationsRestartStreamRequestTypedDict": ".agents_api_v1_conversations_restart_streamop", + "AgentsAPIV1ConversationsRestartRequest": ".agents_api_v1_conversations_restartop", + "AgentsAPIV1ConversationsRestartRequestTypedDict": ".agents_api_v1_conversations_restartop", "AgentsCompletionRequest": ".agentscompletionrequest", "AgentsCompletionRequestMessage": ".agentscompletionrequest", "AgentsCompletionRequestMessageTypedDict": ".agentscompletionrequest", @@ -1601,23 +1732,12 @@ "AgentsCompletionStreamRequestToolChoice": ".agentscompletionstreamrequest", "AgentsCompletionStreamRequestToolChoiceTypedDict": ".agentscompletionstreamrequest", "AgentsCompletionStreamRequestTypedDict": ".agentscompletionstreamrequest", - "AgentUpdateRequest": ".agentupdaterequest", - "AgentUpdateRequestTool": ".agentupdaterequest", - "AgentUpdateRequestToolTypedDict": ".agentupdaterequest", - "AgentUpdateRequestTypedDict": ".agentupdaterequest", "APIEndpoint": ".apiendpoint", - "AppendConversationRequest": ".appendconversationop", - "AppendConversationRequestTypedDict": ".appendconversationop", - "AppendConversationStreamRequest": ".appendconversationstreamop", - "AppendConversationStreamRequestTypedDict": ".appendconversationstreamop", - "ArchiveFTModelOut": ".archiveftmodelout", - "ArchiveFTModelOutTypedDict": ".archiveftmodelout", - "ArchiveModelRequest": ".archivemodelop", - "ArchiveModelRequestTypedDict": ".archivemodelop", + "ArchiveModelResponse": ".archivemodelresponse", + "ArchiveModelResponseTypedDict": ".archivemodelresponse", "AssistantMessage": ".assistantmessage", "AssistantMessageContent": ".assistantmessage", "AssistantMessageContentTypedDict": ".assistantmessage", - "AssistantMessageRole": ".assistantmessage", "AssistantMessageTypedDict": ".assistantmessage", "AudioChunk": ".audiochunk", "AudioChunkTypedDict": ".audiochunk", @@ -1632,22 +1752,12 @@ "BaseModelCardTypedDict": ".basemodelcard", "BatchError": ".batcherror", "BatchErrorTypedDict": ".batcherror", - "BatchJobIn": ".batchjobin", - "BatchJobInTypedDict": ".batchjobin", - "BatchJobOut": ".batchjobout", - "BatchJobOutTypedDict": ".batchjobout", - "BatchJobsOut": ".batchjobsout", - "BatchJobsOutTypedDict": ".batchjobsout", + "BatchJob": ".batchjob", + "BatchJobTypedDict": ".batchjob", "BatchJobStatus": ".batchjobstatus", "BatchRequest": ".batchrequest", "BatchRequestTypedDict": ".batchrequest", "BuiltInConnectors": ".builtinconnectors", - "CancelBatchJobRequest": ".cancelbatchjobop", - "CancelBatchJobRequestTypedDict": ".cancelbatchjobop", - "CancelFineTuningJobRequest": ".cancelfinetuningjobop", - "CancelFineTuningJobRequestTypedDict": ".cancelfinetuningjobop", - "CancelFineTuningJobResponse": ".cancelfinetuningjobop", - "CancelFineTuningJobResponseTypedDict": ".cancelfinetuningjobop", "ChatClassificationRequest": ".chatclassificationrequest", "ChatClassificationRequestTypedDict": ".chatclassificationrequest", "ChatCompletionChoice": ".chatcompletionchoice", @@ -1679,8 +1789,8 @@ "ChatModerationRequestInputs3": ".chatmoderationrequest", "ChatModerationRequestInputs3TypedDict": ".chatmoderationrequest", "ChatModerationRequestTypedDict": ".chatmoderationrequest", - "CheckpointOut": ".checkpointout", - "CheckpointOutTypedDict": ".checkpointout", + "Checkpoint": ".checkpoint", + "CheckpointTypedDict": ".checkpoint", "ClassificationRequest": ".classificationrequest", "ClassificationRequestInputs": ".classificationrequest", "ClassificationRequestInputsTypedDict": ".classificationrequest", @@ -1689,26 +1799,26 @@ "ClassificationResponseTypedDict": ".classificationresponse", "ClassificationTargetResult": ".classificationtargetresult", "ClassificationTargetResultTypedDict": ".classificationtargetresult", - "ClassifierDetailedJobOut": ".classifierdetailedjobout", - "ClassifierDetailedJobOutIntegration": ".classifierdetailedjobout", - "ClassifierDetailedJobOutIntegrationTypedDict": ".classifierdetailedjobout", - "ClassifierDetailedJobOutStatus": ".classifierdetailedjobout", - "ClassifierDetailedJobOutTypedDict": ".classifierdetailedjobout", - "ClassifierFTModelOut": ".classifierftmodelout", - "ClassifierFTModelOutTypedDict": ".classifierftmodelout", - "ClassifierJobOut": ".classifierjobout", - "ClassifierJobOutIntegration": ".classifierjobout", - "ClassifierJobOutIntegrationTypedDict": ".classifierjobout", - "ClassifierJobOutStatus": ".classifierjobout", - "ClassifierJobOutTypedDict": ".classifierjobout", - "ClassifierTargetIn": ".classifiertargetin", - "ClassifierTargetInTypedDict": ".classifiertargetin", - "ClassifierTargetOut": ".classifiertargetout", - "ClassifierTargetOutTypedDict": ".classifiertargetout", + "ClassifierFineTunedModel": ".classifierfinetunedmodel", + "ClassifierFineTunedModelTypedDict": ".classifierfinetunedmodel", + "ClassifierFineTuningJob": ".classifierfinetuningjob", + "ClassifierFineTuningJobIntegration": ".classifierfinetuningjob", + "ClassifierFineTuningJobIntegrationTypedDict": ".classifierfinetuningjob", + "ClassifierFineTuningJobStatus": ".classifierfinetuningjob", + "ClassifierFineTuningJobTypedDict": ".classifierfinetuningjob", + "UnknownClassifierFineTuningJobIntegration": ".classifierfinetuningjob", + "ClassifierFineTuningJobDetails": ".classifierfinetuningjobdetails", + "ClassifierFineTuningJobDetailsIntegration": ".classifierfinetuningjobdetails", + "ClassifierFineTuningJobDetailsIntegrationTypedDict": ".classifierfinetuningjobdetails", + "ClassifierFineTuningJobDetailsStatus": ".classifierfinetuningjobdetails", + "ClassifierFineTuningJobDetailsTypedDict": ".classifierfinetuningjobdetails", + "UnknownClassifierFineTuningJobDetailsIntegration": ".classifierfinetuningjobdetails", + "ClassifierTarget": ".classifiertarget", + "ClassifierTargetTypedDict": ".classifiertarget", + "ClassifierTargetResult": ".classifiertargetresult", + "ClassifierTargetResultTypedDict": ".classifiertargetresult", "ClassifierTrainingParameters": ".classifiertrainingparameters", "ClassifierTrainingParametersTypedDict": ".classifiertrainingparameters", - "ClassifierTrainingParametersIn": ".classifiertrainingparametersin", - "ClassifierTrainingParametersInTypedDict": ".classifiertrainingparametersin", "CodeInterpreterTool": ".codeinterpretertool", "CodeInterpreterToolTypedDict": ".codeinterpretertool", "CompletionArgs": ".completionargs", @@ -1717,33 +1827,36 @@ "CompletionArgsStopTypedDict": ".completionargsstop", "CompletionChunk": ".completionchunk", "CompletionChunkTypedDict": ".completionchunk", - "CompletionDetailedJobOut": ".completiondetailedjobout", - "CompletionDetailedJobOutIntegration": ".completiondetailedjobout", - "CompletionDetailedJobOutIntegrationTypedDict": ".completiondetailedjobout", - "CompletionDetailedJobOutRepository": ".completiondetailedjobout", - "CompletionDetailedJobOutRepositoryTypedDict": ".completiondetailedjobout", - "CompletionDetailedJobOutStatus": ".completiondetailedjobout", - "CompletionDetailedJobOutTypedDict": ".completiondetailedjobout", "CompletionEvent": ".completionevent", "CompletionEventTypedDict": ".completionevent", - "CompletionFTModelOut": ".completionftmodelout", - "CompletionFTModelOutTypedDict": ".completionftmodelout", - "CompletionJobOut": ".completionjobout", - "CompletionJobOutIntegration": ".completionjobout", - "CompletionJobOutIntegrationTypedDict": ".completionjobout", - "CompletionJobOutRepository": ".completionjobout", - "CompletionJobOutRepositoryTypedDict": ".completionjobout", - "CompletionJobOutStatus": ".completionjobout", - "CompletionJobOutTypedDict": ".completionjobout", + "CompletionFineTunedModel": ".completionfinetunedmodel", + "CompletionFineTunedModelTypedDict": ".completionfinetunedmodel", + "CompletionFineTuningJob": ".completionfinetuningjob", + "CompletionFineTuningJobIntegration": ".completionfinetuningjob", + "CompletionFineTuningJobIntegrationTypedDict": ".completionfinetuningjob", + "CompletionFineTuningJobRepository": ".completionfinetuningjob", + "CompletionFineTuningJobRepositoryTypedDict": ".completionfinetuningjob", + "CompletionFineTuningJobStatus": ".completionfinetuningjob", + "CompletionFineTuningJobTypedDict": ".completionfinetuningjob", + "UnknownCompletionFineTuningJobIntegration": ".completionfinetuningjob", + "UnknownCompletionFineTuningJobRepository": ".completionfinetuningjob", + "CompletionFineTuningJobDetails": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsIntegration": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsIntegrationTypedDict": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsRepository": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsRepositoryTypedDict": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsStatus": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsTypedDict": ".completionfinetuningjobdetails", + "UnknownCompletionFineTuningJobDetailsIntegration": ".completionfinetuningjobdetails", + "UnknownCompletionFineTuningJobDetailsRepository": ".completionfinetuningjobdetails", "CompletionResponseStreamChoice": ".completionresponsestreamchoice", "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", "CompletionTrainingParameters": ".completiontrainingparameters", "CompletionTrainingParametersTypedDict": ".completiontrainingparameters", - "CompletionTrainingParametersIn": ".completiontrainingparametersin", - "CompletionTrainingParametersInTypedDict": ".completiontrainingparametersin", "ContentChunk": ".contentchunk", "ContentChunkTypedDict": ".contentchunk", + "UnknownContentChunk": ".contentchunk", "ConversationAppendRequest": ".conversationappendrequest", "ConversationAppendRequestHandoffExecution": ".conversationappendrequest", "ConversationAppendRequestTypedDict": ".conversationappendrequest", @@ -1754,15 +1867,14 @@ "ConversationEventsData": ".conversationevents", "ConversationEventsDataTypedDict": ".conversationevents", "ConversationEventsTypedDict": ".conversationevents", + "UnknownConversationEventsData": ".conversationevents", "ConversationHistory": ".conversationhistory", - "ConversationHistoryObject": ".conversationhistory", "ConversationHistoryTypedDict": ".conversationhistory", "Entry": ".conversationhistory", "EntryTypedDict": ".conversationhistory", "ConversationInputs": ".conversationinputs", "ConversationInputsTypedDict": ".conversationinputs", "ConversationMessages": ".conversationmessages", - "ConversationMessagesObject": ".conversationmessages", "ConversationMessagesTypedDict": ".conversationmessages", "ConversationRequest": ".conversationrequest", "ConversationRequestAgentVersion": ".conversationrequest", @@ -1772,7 +1884,6 @@ "ConversationRequestToolTypedDict": ".conversationrequest", "ConversationRequestTypedDict": ".conversationrequest", "ConversationResponse": ".conversationresponse", - "ConversationResponseObject": ".conversationresponse", "ConversationResponseTypedDict": ".conversationresponse", "Output": ".conversationresponse", "OutputTypedDict": ".conversationresponse", @@ -1793,53 +1904,48 @@ "ConversationStreamRequestTool": ".conversationstreamrequest", "ConversationStreamRequestToolTypedDict": ".conversationstreamrequest", "ConversationStreamRequestTypedDict": ".conversationstreamrequest", + "ConversationThinkChunk": ".conversationthinkchunk", + "ConversationThinkChunkThinking": ".conversationthinkchunk", + "ConversationThinkChunkThinkingTypedDict": ".conversationthinkchunk", + "ConversationThinkChunkTypedDict": ".conversationthinkchunk", "ConversationUsageInfo": ".conversationusageinfo", "ConversationUsageInfoTypedDict": ".conversationusageinfo", - "CreateFineTuningJobResponse": ".createfinetuningjobop", - "CreateFineTuningJobResponseTypedDict": ".createfinetuningjobop", - "Response": ".createfinetuningjobop", - "ResponseTypedDict": ".createfinetuningjobop", - "CreateOrUpdateAgentAliasRequest": ".createorupdateagentaliasop", - "CreateOrUpdateAgentAliasRequestTypedDict": ".createorupdateagentaliasop", - "DeleteAgentAliasRequest": ".deleteagentaliasop", - "DeleteAgentAliasRequestTypedDict": ".deleteagentaliasop", - "DeleteAgentRequest": ".deleteagentop", - "DeleteAgentRequestTypedDict": ".deleteagentop", - "DeleteConversationRequest": ".deleteconversationop", - "DeleteConversationRequestTypedDict": ".deleteconversationop", - "DeleteDocumentRequest": ".deletedocumentop", - "DeleteDocumentRequestTypedDict": ".deletedocumentop", - "DeleteFileRequest": ".deletefileop", - "DeleteFileRequestTypedDict": ".deletefileop", - "DeleteFileOut": ".deletefileout", - "DeleteFileOutTypedDict": ".deletefileout", - "DeleteLibraryAccessRequest": ".deletelibraryaccessop", - "DeleteLibraryAccessRequestTypedDict": ".deletelibraryaccessop", - "DeleteLibraryRequest": ".deletelibraryop", - "DeleteLibraryRequestTypedDict": ".deletelibraryop", - "DeleteModelRequest": ".deletemodelop", - "DeleteModelRequestTypedDict": ".deletemodelop", + "CreateAgentRequest": ".createagentrequest", + "CreateAgentRequestTool": ".createagentrequest", + "CreateAgentRequestToolTypedDict": ".createagentrequest", + "CreateAgentRequestTypedDict": ".createagentrequest", + "CreateBatchJobRequest": ".createbatchjobrequest", + "CreateBatchJobRequestTypedDict": ".createbatchjobrequest", + "CreateFileResponse": ".createfileresponse", + "CreateFileResponseTypedDict": ".createfileresponse", + "CreateFineTuningJobRequest": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestIntegration": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestIntegrationTypedDict": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestRepository": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestRepositoryTypedDict": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestTypedDict": ".createfinetuningjobrequest", + "Hyperparameters": ".createfinetuningjobrequest", + "HyperparametersTypedDict": ".createfinetuningjobrequest", + "CreateLibraryRequest": ".createlibraryrequest", + "CreateLibraryRequestTypedDict": ".createlibraryrequest", + "DeleteModelV1ModelsModelIDDeleteRequest": ".delete_model_v1_models_model_id_deleteop", + "DeleteModelV1ModelsModelIDDeleteRequestTypedDict": ".delete_model_v1_models_model_id_deleteop", + "DeleteFileResponse": ".deletefileresponse", + "DeleteFileResponseTypedDict": ".deletefileresponse", "DeleteModelOut": ".deletemodelout", "DeleteModelOutTypedDict": ".deletemodelout", "DeltaMessage": ".deltamessage", "DeltaMessageContent": ".deltamessage", "DeltaMessageContentTypedDict": ".deltamessage", "DeltaMessageTypedDict": ".deltamessage", + "Document": ".document", + "DocumentTypedDict": ".document", "DocumentLibraryTool": ".documentlibrarytool", "DocumentLibraryToolTypedDict": ".documentlibrarytool", - "DocumentOut": ".documentout", - "DocumentOutTypedDict": ".documentout", "DocumentTextContent": ".documenttextcontent", "DocumentTextContentTypedDict": ".documenttextcontent", - "Attributes": ".documentupdatein", - "AttributesTypedDict": ".documentupdatein", - "DocumentUpdateIn": ".documentupdatein", - "DocumentUpdateInTypedDict": ".documentupdatein", "DocumentURLChunk": ".documenturlchunk", - "DocumentURLChunkType": ".documenturlchunk", "DocumentURLChunkTypedDict": ".documenturlchunk", - "DownloadFileRequest": ".downloadfileop", - "DownloadFileRequestTypedDict": ".downloadfileop", "EmbeddingDtype": ".embeddingdtype", "EmbeddingRequest": ".embeddingrequest", "EmbeddingRequestInputs": ".embeddingrequest", @@ -1851,17 +1957,27 @@ "EmbeddingResponseDataTypedDict": ".embeddingresponsedata", "EncodingFormat": ".encodingformat", "EntityType": ".entitytype", - "EventOut": ".eventout", - "EventOutTypedDict": ".eventout", + "Event": ".event", + "EventTypedDict": ".event", "File": ".file", "FileTypedDict": ".file", "FileChunk": ".filechunk", "FileChunkTypedDict": ".filechunk", "FilePurpose": ".filepurpose", + "FilesAPIRoutesDeleteFileRequest": ".files_api_routes_delete_fileop", + "FilesAPIRoutesDeleteFileRequestTypedDict": ".files_api_routes_delete_fileop", + "FilesAPIRoutesDownloadFileRequest": ".files_api_routes_download_fileop", + "FilesAPIRoutesDownloadFileRequestTypedDict": ".files_api_routes_download_fileop", + "FilesAPIRoutesGetSignedURLRequest": ".files_api_routes_get_signed_urlop", + "FilesAPIRoutesGetSignedURLRequestTypedDict": ".files_api_routes_get_signed_urlop", + "FilesAPIRoutesListFilesRequest": ".files_api_routes_list_filesop", + "FilesAPIRoutesListFilesRequestTypedDict": ".files_api_routes_list_filesop", + "FilesAPIRoutesRetrieveFileRequest": ".files_api_routes_retrieve_fileop", + "FilesAPIRoutesRetrieveFileRequestTypedDict": ".files_api_routes_retrieve_fileop", + "MultiPartBodyParams": ".files_api_routes_upload_fileop", + "MultiPartBodyParamsTypedDict": ".files_api_routes_upload_fileop", "FileSchema": ".fileschema", "FileSchemaTypedDict": ".fileschema", - "FileSignedURL": ".filesignedurl", - "FileSignedURLTypedDict": ".filesignedurl", "FIMCompletionRequest": ".fimcompletionrequest", "FIMCompletionRequestStop": ".fimcompletionrequest", "FIMCompletionRequestStopTypedDict": ".fimcompletionrequest", @@ -1873,9 +1989,9 @@ "FIMCompletionStreamRequestStopTypedDict": ".fimcompletionstreamrequest", "FIMCompletionStreamRequestTypedDict": ".fimcompletionstreamrequest", "FineTuneableModelType": ".finetuneablemodeltype", + "FineTunedModelCapabilities": ".finetunedmodelcapabilities", + "FineTunedModelCapabilitiesTypedDict": ".finetunedmodelcapabilities", "FTClassifierLossFunction": ".ftclassifierlossfunction", - "FTModelCapabilitiesOut": ".ftmodelcapabilitiesout", - "FTModelCapabilitiesOutTypedDict": ".ftmodelcapabilitiesout", "FTModelCard": ".ftmodelcard", "FTModelCardTypedDict": ".ftmodelcard", "Function": ".function", @@ -1885,134 +2001,138 @@ "FunctionCall": ".functioncall", "FunctionCallTypedDict": ".functioncall", "FunctionCallEntry": ".functioncallentry", - "FunctionCallEntryObject": ".functioncallentry", - "FunctionCallEntryType": ".functioncallentry", + "FunctionCallEntryConfirmationStatus": ".functioncallentry", "FunctionCallEntryTypedDict": ".functioncallentry", "FunctionCallEntryArguments": ".functioncallentryarguments", "FunctionCallEntryArgumentsTypedDict": ".functioncallentryarguments", "FunctionCallEvent": ".functioncallevent", + "FunctionCallEventConfirmationStatus": ".functioncallevent", "FunctionCallEventTypedDict": ".functioncallevent", "FunctionName": ".functionname", "FunctionNameTypedDict": ".functionname", "FunctionResultEntry": ".functionresultentry", - "FunctionResultEntryObject": ".functionresultentry", - "FunctionResultEntryType": ".functionresultentry", "FunctionResultEntryTypedDict": ".functionresultentry", "FunctionTool": ".functiontool", "FunctionToolTypedDict": ".functiontool", - "GetAgentAgentVersion": ".getagentop", - "GetAgentAgentVersionTypedDict": ".getagentop", - "GetAgentRequest": ".getagentop", - "GetAgentRequestTypedDict": ".getagentop", - "GetAgentVersionRequest": ".getagentversionop", - "GetAgentVersionRequestTypedDict": ".getagentversionop", - "GetBatchJobRequest": ".getbatchjobop", - "GetBatchJobRequestTypedDict": ".getbatchjobop", - "GetConversationHistoryRequest": ".getconversationhistoryop", - "GetConversationHistoryRequestTypedDict": ".getconversationhistoryop", - "GetConversationMessagesRequest": ".getconversationmessagesop", - "GetConversationMessagesRequestTypedDict": ".getconversationmessagesop", - "GetConversationRequest": ".getconversationop", - "GetConversationRequestTypedDict": ".getconversationop", - "ResponseV1ConversationsGet": ".getconversationop", - "ResponseV1ConversationsGetTypedDict": ".getconversationop", - "GetDocumentExtractedTextSignedURLRequest": ".getdocumentextractedtextsignedurlop", - "GetDocumentExtractedTextSignedURLRequestTypedDict": ".getdocumentextractedtextsignedurlop", - "GetDocumentRequest": ".getdocumentop", - "GetDocumentRequestTypedDict": ".getdocumentop", - "GetDocumentSignedURLRequest": ".getdocumentsignedurlop", - "GetDocumentSignedURLRequestTypedDict": ".getdocumentsignedurlop", - "GetDocumentStatusRequest": ".getdocumentstatusop", - "GetDocumentStatusRequestTypedDict": ".getdocumentstatusop", - "GetDocumentTextContentRequest": ".getdocumenttextcontentop", - "GetDocumentTextContentRequestTypedDict": ".getdocumenttextcontentop", - "GetFileSignedURLRequest": ".getfilesignedurlop", - "GetFileSignedURLRequestTypedDict": ".getfilesignedurlop", - "GetFineTuningJobRequest": ".getfinetuningjobop", - "GetFineTuningJobRequestTypedDict": ".getfinetuningjobop", - "GetFineTuningJobResponse": ".getfinetuningjobop", - "GetFineTuningJobResponseTypedDict": ".getfinetuningjobop", - "GetLibraryRequest": ".getlibraryop", - "GetLibraryRequestTypedDict": ".getlibraryop", + "GetFileResponse": ".getfileresponse", + "GetFileResponseTypedDict": ".getfileresponse", + "GetSignedURLResponse": ".getsignedurlresponse", + "GetSignedURLResponseTypedDict": ".getsignedurlresponse", + "GithubRepository": ".githubrepository", + "GithubRepositoryTypedDict": ".githubrepository", "GithubRepositoryIn": ".githubrepositoryin", "GithubRepositoryInTypedDict": ".githubrepositoryin", - "GithubRepositoryOut": ".githubrepositoryout", - "GithubRepositoryOutTypedDict": ".githubrepositoryout", - "HTTPValidationError": ".httpvalidationerror", - "HTTPValidationErrorData": ".httpvalidationerror", + "ImageDetail": ".imagedetail", "ImageGenerationTool": ".imagegenerationtool", "ImageGenerationToolTypedDict": ".imagegenerationtool", "ImageURL": ".imageurl", "ImageURLTypedDict": ".imageurl", "ImageURLChunk": ".imageurlchunk", - "ImageURLChunkType": ".imageurlchunk", "ImageURLChunkTypedDict": ".imageurlchunk", "ImageURLUnion": ".imageurlchunk", "ImageURLUnionTypedDict": ".imageurlchunk", "InputEntries": ".inputentries", "InputEntriesTypedDict": ".inputentries", "Inputs": ".inputs", - "InputsMessage": ".inputs", - "InputsMessageTypedDict": ".inputs", "InputsTypedDict": ".inputs", - "InstructRequestInputs": ".inputs", - "InstructRequestInputsTypedDict": ".inputs", "InstructRequest": ".instructrequest", "InstructRequestMessage": ".instructrequest", "InstructRequestMessageTypedDict": ".instructrequest", "InstructRequestTypedDict": ".instructrequest", - "Hyperparameters": ".jobin", - "HyperparametersTypedDict": ".jobin", - "JobIn": ".jobin", - "JobInIntegration": ".jobin", - "JobInIntegrationTypedDict": ".jobin", - "JobInRepository": ".jobin", - "JobInRepositoryTypedDict": ".jobin", - "JobInTypedDict": ".jobin", - "JobMetadataOut": ".jobmetadataout", - "JobMetadataOutTypedDict": ".jobmetadataout", - "JobsOut": ".jobsout", - "JobsOutData": ".jobsout", - "JobsOutDataTypedDict": ".jobsout", - "JobsOutTypedDict": ".jobsout", + "JobMetadata": ".jobmetadata", + "JobMetadataTypedDict": ".jobmetadata", + "JobsAPIRoutesBatchCancelBatchJobRequest": ".jobs_api_routes_batch_cancel_batch_jobop", + "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict": ".jobs_api_routes_batch_cancel_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobRequest": ".jobs_api_routes_batch_get_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobsRequest": ".jobs_api_routes_batch_get_batch_jobsop", + "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobsop", + "OrderBy": ".jobs_api_routes_batch_get_batch_jobsop", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequest": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponse": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "Response": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "ResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "UnknownResponse": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobResponse": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningGetFineTuningJobsStatus": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningStartFineTuningJobRequest": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobResponse": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", "JSONSchema": ".jsonschema", "JSONSchemaTypedDict": ".jsonschema", - "LegacyJobMetadataOut": ".legacyjobmetadataout", - "LegacyJobMetadataOutTypedDict": ".legacyjobmetadataout", - "LibraryIn": ".libraryin", - "LibraryInTypedDict": ".libraryin", - "LibraryInUpdate": ".libraryinupdate", - "LibraryInUpdateTypedDict": ".libraryinupdate", - "LibraryOut": ".libraryout", - "LibraryOutTypedDict": ".libraryout", - "ListAgentAliasesRequest": ".listagentaliasesop", - "ListAgentAliasesRequestTypedDict": ".listagentaliasesop", - "ListAgentsRequest": ".listagentsop", - "ListAgentsRequestTypedDict": ".listagentsop", - "ListAgentVersionsRequest": ".listagentversionsop", - "ListAgentVersionsRequestTypedDict": ".listagentversionsop", - "ListBatchJobsRequest": ".listbatchjobsop", - "ListBatchJobsRequestTypedDict": ".listbatchjobsop", - "OrderBy": ".listbatchjobsop", - "ListConversationsRequest": ".listconversationsop", - "ListConversationsRequestTypedDict": ".listconversationsop", - "ListConversationsResponse": ".listconversationsop", - "ListConversationsResponseTypedDict": ".listconversationsop", - "ListDocumentOut": ".listdocumentout", - "ListDocumentOutTypedDict": ".listdocumentout", - "ListDocumentsRequest": ".listdocumentsop", - "ListDocumentsRequestTypedDict": ".listdocumentsop", - "ListFilesRequest": ".listfilesop", - "ListFilesRequestTypedDict": ".listfilesop", - "ListFilesOut": ".listfilesout", - "ListFilesOutTypedDict": ".listfilesout", - "ListFineTuningJobsRequest": ".listfinetuningjobsop", - "ListFineTuningJobsRequestTypedDict": ".listfinetuningjobsop", - "ListFineTuningJobsStatus": ".listfinetuningjobsop", - "ListLibraryAccessesRequest": ".listlibraryaccessesop", - "ListLibraryAccessesRequestTypedDict": ".listlibraryaccessesop", - "ListLibraryOut": ".listlibraryout", - "ListLibraryOutTypedDict": ".listlibraryout", + "LegacyJobMetadata": ".legacyjobmetadata", + "LegacyJobMetadataTypedDict": ".legacyjobmetadata", + "LibrariesDeleteV1Request": ".libraries_delete_v1op", + "LibrariesDeleteV1RequestTypedDict": ".libraries_delete_v1op", + "LibrariesDocumentsDeleteV1Request": ".libraries_documents_delete_v1op", + "LibrariesDocumentsDeleteV1RequestTypedDict": ".libraries_documents_delete_v1op", + "LibrariesDocumentsGetExtractedTextSignedURLV1Request": ".libraries_documents_get_extracted_text_signed_url_v1op", + "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict": ".libraries_documents_get_extracted_text_signed_url_v1op", + "LibrariesDocumentsGetSignedURLV1Request": ".libraries_documents_get_signed_url_v1op", + "LibrariesDocumentsGetSignedURLV1RequestTypedDict": ".libraries_documents_get_signed_url_v1op", + "LibrariesDocumentsGetStatusV1Request": ".libraries_documents_get_status_v1op", + "LibrariesDocumentsGetStatusV1RequestTypedDict": ".libraries_documents_get_status_v1op", + "LibrariesDocumentsGetTextContentV1Request": ".libraries_documents_get_text_content_v1op", + "LibrariesDocumentsGetTextContentV1RequestTypedDict": ".libraries_documents_get_text_content_v1op", + "LibrariesDocumentsGetV1Request": ".libraries_documents_get_v1op", + "LibrariesDocumentsGetV1RequestTypedDict": ".libraries_documents_get_v1op", + "LibrariesDocumentsListV1Request": ".libraries_documents_list_v1op", + "LibrariesDocumentsListV1RequestTypedDict": ".libraries_documents_list_v1op", + "LibrariesDocumentsReprocessV1Request": ".libraries_documents_reprocess_v1op", + "LibrariesDocumentsReprocessV1RequestTypedDict": ".libraries_documents_reprocess_v1op", + "LibrariesDocumentsUpdateV1Request": ".libraries_documents_update_v1op", + "LibrariesDocumentsUpdateV1RequestTypedDict": ".libraries_documents_update_v1op", + "DocumentUpload": ".libraries_documents_upload_v1op", + "DocumentUploadTypedDict": ".libraries_documents_upload_v1op", + "LibrariesDocumentsUploadV1Request": ".libraries_documents_upload_v1op", + "LibrariesDocumentsUploadV1RequestTypedDict": ".libraries_documents_upload_v1op", + "LibrariesGetV1Request": ".libraries_get_v1op", + "LibrariesGetV1RequestTypedDict": ".libraries_get_v1op", + "LibrariesShareCreateV1Request": ".libraries_share_create_v1op", + "LibrariesShareCreateV1RequestTypedDict": ".libraries_share_create_v1op", + "LibrariesShareDeleteV1Request": ".libraries_share_delete_v1op", + "LibrariesShareDeleteV1RequestTypedDict": ".libraries_share_delete_v1op", + "LibrariesShareListV1Request": ".libraries_share_list_v1op", + "LibrariesShareListV1RequestTypedDict": ".libraries_share_list_v1op", + "LibrariesUpdateV1Request": ".libraries_update_v1op", + "LibrariesUpdateV1RequestTypedDict": ".libraries_update_v1op", + "Library": ".library", + "LibraryTypedDict": ".library", + "ListBatchJobsResponse": ".listbatchjobsresponse", + "ListBatchJobsResponseTypedDict": ".listbatchjobsresponse", + "ListDocumentsResponse": ".listdocumentsresponse", + "ListDocumentsResponseTypedDict": ".listdocumentsresponse", + "ListFilesResponse": ".listfilesresponse", + "ListFilesResponseTypedDict": ".listfilesresponse", + "ListFineTuningJobsResponse": ".listfinetuningjobsresponse", + "ListFineTuningJobsResponseData": ".listfinetuningjobsresponse", + "ListFineTuningJobsResponseDataTypedDict": ".listfinetuningjobsresponse", + "ListFineTuningJobsResponseTypedDict": ".listfinetuningjobsresponse", + "UnknownListFineTuningJobsResponseData": ".listfinetuningjobsresponse", + "ListLibrariesResponse": ".listlibrariesresponse", + "ListLibrariesResponseTypedDict": ".listlibrariesresponse", "ListSharingOut": ".listsharingout", "ListSharingOutTypedDict": ".listsharingout", "MessageEntries": ".messageentries", @@ -2022,51 +2142,45 @@ "MessageInputEntry": ".messageinputentry", "MessageInputEntryContent": ".messageinputentry", "MessageInputEntryContentTypedDict": ".messageinputentry", - "MessageInputEntryObject": ".messageinputentry", - "MessageInputEntryRole": ".messageinputentry", - "MessageInputEntryType": ".messageinputentry", "MessageInputEntryTypedDict": ".messageinputentry", + "Role": ".messageinputentry", "MessageOutputContentChunks": ".messageoutputcontentchunks", "MessageOutputContentChunksTypedDict": ".messageoutputcontentchunks", "MessageOutputEntry": ".messageoutputentry", "MessageOutputEntryContent": ".messageoutputentry", "MessageOutputEntryContentTypedDict": ".messageoutputentry", - "MessageOutputEntryObject": ".messageoutputentry", - "MessageOutputEntryRole": ".messageoutputentry", - "MessageOutputEntryType": ".messageoutputentry", "MessageOutputEntryTypedDict": ".messageoutputentry", "MessageOutputEvent": ".messageoutputevent", "MessageOutputEventContent": ".messageoutputevent", "MessageOutputEventContentTypedDict": ".messageoutputevent", - "MessageOutputEventRole": ".messageoutputevent", "MessageOutputEventTypedDict": ".messageoutputevent", - "MetricOut": ".metricout", - "MetricOutTypedDict": ".metricout", + "Metric": ".metric", + "MetricTypedDict": ".metric", "MistralPromptMode": ".mistralpromptmode", "ModelCapabilities": ".modelcapabilities", "ModelCapabilitiesTypedDict": ".modelcapabilities", "ModelConversation": ".modelconversation", - "ModelConversationObject": ".modelconversation", "ModelConversationTool": ".modelconversation", "ModelConversationToolTypedDict": ".modelconversation", "ModelConversationTypedDict": ".modelconversation", + "UnknownModelConversationTool": ".modelconversation", "ModelList": ".modellist", "ModelListData": ".modellist", "ModelListDataTypedDict": ".modellist", "ModelListTypedDict": ".modellist", + "UnknownModelListData": ".modellist", "ModerationObject": ".moderationobject", "ModerationObjectTypedDict": ".moderationobject", "ModerationResponse": ".moderationresponse", "ModerationResponseTypedDict": ".moderationresponse", - "NoResponseError": ".no_response_error", "OCRImageObject": ".ocrimageobject", "OCRImageObjectTypedDict": ".ocrimageobject", "OCRPageDimensions": ".ocrpagedimensions", "OCRPageDimensionsTypedDict": ".ocrpagedimensions", "OCRPageObject": ".ocrpageobject", "OCRPageObjectTypedDict": ".ocrpageobject", - "Document": ".ocrrequest", - "DocumentTypedDict": ".ocrrequest", + "DocumentUnion": ".ocrrequest", + "DocumentUnionTypedDict": ".ocrrequest", "OCRRequest": ".ocrrequest", "OCRRequestTypedDict": ".ocrrequest", "TableFormat": ".ocrrequest", @@ -2091,17 +2205,24 @@ "RealtimeTranscriptionErrorDetailMessage": ".realtimetranscriptionerrordetail", "RealtimeTranscriptionErrorDetailMessageTypedDict": ".realtimetranscriptionerrordetail", "RealtimeTranscriptionErrorDetailTypedDict": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionInputAudioAppend": ".realtimetranscriptioninputaudioappend", + "RealtimeTranscriptionInputAudioAppendTypedDict": ".realtimetranscriptioninputaudioappend", + "RealtimeTranscriptionInputAudioEnd": ".realtimetranscriptioninputaudioend", + "RealtimeTranscriptionInputAudioEndTypedDict": ".realtimetranscriptioninputaudioend", + "RealtimeTranscriptionInputAudioFlush": ".realtimetranscriptioninputaudioflush", + "RealtimeTranscriptionInputAudioFlushTypedDict": ".realtimetranscriptioninputaudioflush", "RealtimeTranscriptionSession": ".realtimetranscriptionsession", "RealtimeTranscriptionSessionTypedDict": ".realtimetranscriptionsession", "RealtimeTranscriptionSessionCreated": ".realtimetranscriptionsessioncreated", "RealtimeTranscriptionSessionCreatedTypedDict": ".realtimetranscriptionsessioncreated", "RealtimeTranscriptionSessionUpdated": ".realtimetranscriptionsessionupdated", "RealtimeTranscriptionSessionUpdatedTypedDict": ".realtimetranscriptionsessionupdated", + "RealtimeTranscriptionSessionUpdateMessage": ".realtimetranscriptionsessionupdatemessage", + "RealtimeTranscriptionSessionUpdateMessageTypedDict": ".realtimetranscriptionsessionupdatemessage", + "RealtimeTranscriptionSessionUpdatePayload": ".realtimetranscriptionsessionupdatepayload", + "RealtimeTranscriptionSessionUpdatePayloadTypedDict": ".realtimetranscriptionsessionupdatepayload", "ReferenceChunk": ".referencechunk", - "ReferenceChunkType": ".referencechunk", "ReferenceChunkTypedDict": ".referencechunk", - "ReprocessDocumentRequest": ".reprocessdocumentop", - "ReprocessDocumentRequestTypedDict": ".reprocessdocumentop", "RequestSource": ".requestsource", "ResponseDoneEvent": ".responsedoneevent", "ResponseDoneEventTypedDict": ".responsedoneevent", @@ -2112,21 +2233,12 @@ "ResponseFormats": ".responseformats", "ResponseStartedEvent": ".responsestartedevent", "ResponseStartedEventTypedDict": ".responsestartedevent", - "ResponseValidationError": ".responsevalidationerror", - "RestartConversationRequest": ".restartconversationop", - "RestartConversationRequestTypedDict": ".restartconversationop", - "RestartConversationStreamRequest": ".restartconversationstreamop", - "RestartConversationStreamRequestTypedDict": ".restartconversationstreamop", - "RetrieveFileRequest": ".retrievefileop", - "RetrieveFileRequestTypedDict": ".retrievefileop", - "RetrieveFileOut": ".retrievefileout", - "RetrieveFileOutTypedDict": ".retrievefileout", - "ResponseRetrieveModelV1ModelsModelIDGet": ".retrievemodelop", - "ResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrievemodelop", - "RetrieveModelRequest": ".retrievemodelop", - "RetrieveModelRequestTypedDict": ".retrievemodelop", + "ResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", + "ResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrieve_model_v1_models_model_id_getop", + "RetrieveModelV1ModelsModelIDGetRequest": ".retrieve_model_v1_models_model_id_getop", + "RetrieveModelV1ModelsModelIDGetRequestTypedDict": ".retrieve_model_v1_models_model_id_getop", + "UnknownResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", "SampleType": ".sampletype", - "SDKError": ".sdkerror", "Security": ".security", "SecurityTypedDict": ".security", "ShareEnum": ".shareenum", @@ -2138,10 +2250,6 @@ "SharingOutTypedDict": ".sharingout", "Source": ".source", "SSETypes": ".ssetypes", - "StartFineTuningJobRequest": ".startfinetuningjobop", - "StartFineTuningJobRequestTypedDict": ".startfinetuningjobop", - "StartFineTuningJobResponse": ".startfinetuningjobop", - "StartFineTuningJobResponseTypedDict": ".startfinetuningjobop", "SystemMessage": ".systemmessage", "SystemMessageContent": ".systemmessage", "SystemMessageContentTypedDict": ".systemmessage", @@ -2149,21 +2257,24 @@ "SystemMessageContentChunks": ".systemmessagecontentchunks", "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", "TextChunk": ".textchunk", - "TextChunkType": ".textchunk", "TextChunkTypedDict": ".textchunk", "ThinkChunk": ".thinkchunk", - "ThinkChunkType": ".thinkchunk", + "ThinkChunkThinking": ".thinkchunk", + "ThinkChunkThinkingTypedDict": ".thinkchunk", "ThinkChunkTypedDict": ".thinkchunk", - "Thinking": ".thinkchunk", - "ThinkingTypedDict": ".thinkchunk", "TimestampGranularity": ".timestampgranularity", "Tool": ".tool", "ToolTypedDict": ".tool", "ToolCall": ".toolcall", "ToolCallTypedDict": ".toolcall", + "Confirmation": ".toolcallconfirmation", + "ToolCallConfirmation": ".toolcallconfirmation", + "ToolCallConfirmationTypedDict": ".toolcallconfirmation", "ToolChoice": ".toolchoice", "ToolChoiceTypedDict": ".toolchoice", "ToolChoiceEnum": ".toolchoiceenum", + "ToolConfiguration": ".toolconfiguration", + "ToolConfigurationTypedDict": ".toolconfiguration", "ToolExecutionDeltaEvent": ".toolexecutiondeltaevent", "ToolExecutionDeltaEventName": ".toolexecutiondeltaevent", "ToolExecutionDeltaEventNameTypedDict": ".toolexecutiondeltaevent", @@ -2175,8 +2286,6 @@ "ToolExecutionEntry": ".toolexecutionentry", "ToolExecutionEntryName": ".toolexecutionentry", "ToolExecutionEntryNameTypedDict": ".toolexecutionentry", - "ToolExecutionEntryObject": ".toolexecutionentry", - "ToolExecutionEntryType": ".toolexecutionentry", "ToolExecutionEntryTypedDict": ".toolexecutionentry", "ToolExecutionStartedEvent": ".toolexecutionstartedevent", "ToolExecutionStartedEventName": ".toolexecutionstartedevent", @@ -2185,7 +2294,6 @@ "ToolFileChunk": ".toolfilechunk", "ToolFileChunkTool": ".toolfilechunk", "ToolFileChunkToolTypedDict": ".toolfilechunk", - "ToolFileChunkType": ".toolfilechunk", "ToolFileChunkTypedDict": ".toolfilechunk", "ToolMessage": ".toolmessage", "ToolMessageContent": ".toolmessage", @@ -2194,7 +2302,6 @@ "ToolReferenceChunk": ".toolreferencechunk", "ToolReferenceChunkTool": ".toolreferencechunk", "ToolReferenceChunkToolTypedDict": ".toolreferencechunk", - "ToolReferenceChunkType": ".toolreferencechunk", "ToolReferenceChunkTypedDict": ".toolreferencechunk", "ToolTypes": ".tooltypes", "TrainingFile": ".trainingfile", @@ -2202,7 +2309,6 @@ "TranscriptionResponse": ".transcriptionresponse", "TranscriptionResponseTypedDict": ".transcriptionresponse", "TranscriptionSegmentChunk": ".transcriptionsegmentchunk", - "TranscriptionSegmentChunkType": ".transcriptionsegmentchunk", "TranscriptionSegmentChunkTypedDict": ".transcriptionsegmentchunk", "TranscriptionStreamDone": ".transcriptionstreamdone", "TranscriptionStreamDoneTypedDict": ".transcriptionstreamdone", @@ -2210,6 +2316,7 @@ "TranscriptionStreamEventsData": ".transcriptionstreamevents", "TranscriptionStreamEventsDataTypedDict": ".transcriptionstreamevents", "TranscriptionStreamEventsTypedDict": ".transcriptionstreamevents", + "UnknownTranscriptionStreamEventsData": ".transcriptionstreamevents", "TranscriptionStreamEventTypes": ".transcriptionstreameventtypes", "TranscriptionStreamLanguage": ".transcriptionstreamlanguage", "TranscriptionStreamLanguageTypedDict": ".transcriptionstreamlanguage", @@ -2217,34 +2324,20 @@ "TranscriptionStreamSegmentDeltaTypedDict": ".transcriptionstreamsegmentdelta", "TranscriptionStreamTextDelta": ".transcriptionstreamtextdelta", "TranscriptionStreamTextDeltaTypedDict": ".transcriptionstreamtextdelta", - "UnarchiveFTModelOut": ".unarchiveftmodelout", - "UnarchiveFTModelOutTypedDict": ".unarchiveftmodelout", - "UnarchiveModelRequest": ".unarchivemodelop", - "UnarchiveModelRequestTypedDict": ".unarchivemodelop", - "UpdateAgentRequest": ".updateagentop", - "UpdateAgentRequestTypedDict": ".updateagentop", - "UpdateAgentVersionRequest": ".updateagentversionop", - "UpdateAgentVersionRequestTypedDict": ".updateagentversionop", - "UpdateDocumentRequest": ".updatedocumentop", - "UpdateDocumentRequestTypedDict": ".updatedocumentop", - "UpdateFTModelIn": ".updateftmodelin", - "UpdateFTModelInTypedDict": ".updateftmodelin", - "UpdateLibraryRequest": ".updatelibraryop", - "UpdateLibraryRequestTypedDict": ".updatelibraryop", - "UpdateModelRequest": ".updatemodelop", - "UpdateModelRequestTypedDict": ".updatemodelop", - "UpdateModelResponse": ".updatemodelop", - "UpdateModelResponseTypedDict": ".updatemodelop", - "UpdateOrCreateLibraryAccessRequest": ".updateorcreatelibraryaccessop", - "UpdateOrCreateLibraryAccessRequestTypedDict": ".updateorcreatelibraryaccessop", - "DocumentUpload": ".uploaddocumentop", - "DocumentUploadTypedDict": ".uploaddocumentop", - "UploadDocumentRequest": ".uploaddocumentop", - "UploadDocumentRequestTypedDict": ".uploaddocumentop", - "MultiPartBodyParams": ".uploadfileop", - "MultiPartBodyParamsTypedDict": ".uploadfileop", - "UploadFileOut": ".uploadfileout", - "UploadFileOutTypedDict": ".uploadfileout", + "UnarchiveModelResponse": ".unarchivemodelresponse", + "UnarchiveModelResponseTypedDict": ".unarchivemodelresponse", + "UpdateAgentRequest": ".updateagentrequest", + "UpdateAgentRequestTool": ".updateagentrequest", + "UpdateAgentRequestToolTypedDict": ".updateagentrequest", + "UpdateAgentRequestTypedDict": ".updateagentrequest", + "Attributes": ".updatedocumentrequest", + "AttributesTypedDict": ".updatedocumentrequest", + "UpdateDocumentRequest": ".updatedocumentrequest", + "UpdateDocumentRequestTypedDict": ".updatedocumentrequest", + "UpdateLibraryRequest": ".updatelibraryrequest", + "UpdateLibraryRequestTypedDict": ".updatelibraryrequest", + "UpdateModelRequest": ".updatemodelrequest", + "UpdateModelRequestTypedDict": ".updatemodelrequest", "UsageInfo": ".usageinfo", "UsageInfoTypedDict": ".usageinfo", "UserMessage": ".usermessage", @@ -2257,8 +2350,8 @@ "ValidationErrorTypedDict": ".validationerror", "WandbIntegration": ".wandbintegration", "WandbIntegrationTypedDict": ".wandbintegration", - "WandbIntegrationOut": ".wandbintegrationout", - "WandbIntegrationOutTypedDict": ".wandbintegrationout", + "WandbIntegrationResult": ".wandbintegrationresult", + "WandbIntegrationResultTypedDict": ".wandbintegrationresult", "WebSearchPremiumTool": ".websearchpremiumtool", "WebSearchPremiumToolTypedDict": ".websearchpremiumtool", "WebSearchTool": ".websearchtool", @@ -2266,39 +2359,11 @@ } -def dynamic_import(modname, retries=3): - for attempt in range(retries): - try: - return import_module(modname, __package__) - except KeyError: - # Clear any half-initialized module and retry - sys.modules.pop(modname, None) - if attempt == retries - 1: - break - raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") - - -def __getattr__(attr_name: str) -> object: - module_name = _dynamic_imports.get(attr_name) - if module_name is None: - raise AttributeError( - f"No {attr_name} found in _dynamic_imports for module name -> {__name__} " - ) - - try: - module = dynamic_import(module_name) - result = getattr(module, attr_name) - return result - except ImportError as e: - raise ImportError( - f"Failed to import {attr_name} from {module_name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to get {attr_name} from {module_name}: {e}" - ) from e +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) def __dir__(): - lazy_attrs = builtins.list(_dynamic_imports.keys()) - return builtins.sorted(lazy_attrs) + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/src/mistralai/client/models/agent.py b/src/mistralai/client/models/agent.py index 05ae24cd..686a6eb8 100644 --- a/src/mistralai/client/models/agent.py +++ b/src/mistralai/client/models/agent.py @@ -10,6 +10,7 @@ from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict from .websearchtool import WebSearchTool, WebSearchToolTypedDict from datetime import datetime +from functools import partial from mistralai.client.types import ( BaseModel, Nullable, @@ -17,7 +18,11 @@ UNSET, UNSET_SENTINEL, ) -from pydantic import Field, model_serializer +from mistralai.client.utils import validate_const +from mistralai.client.utils.unions import parse_open_union +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator, BeforeValidator from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -25,16 +30,36 @@ AgentToolTypedDict = TypeAliasType( "AgentToolTypedDict", Union[ + FunctionToolTypedDict, WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, CodeInterpreterToolTypedDict, ImageGenerationToolTypedDict, - FunctionToolTypedDict, DocumentLibraryToolTypedDict, ], ) +class UnknownAgentTool(BaseModel): + r"""A AgentTool variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_AGENT_TOOL_VARIANTS: dict[str, Any] = { + "code_interpreter": CodeInterpreterTool, + "document_library": DocumentLibraryTool, + "function": FunctionTool, + "image_generation": ImageGenerationTool, + "web_search": WebSearchTool, + "web_search_premium": WebSearchPremiumTool, +} + + AgentTool = Annotated[ Union[ CodeInterpreterTool, @@ -43,14 +68,20 @@ ImageGenerationTool, WebSearchTool, WebSearchPremiumTool, + UnknownAgentTool, ], - Field(discriminator="TYPE"), + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_AGENT_TOOL_VARIANTS, + unknown_cls=UnknownAgentTool, + union_name="AgentTool", + ) + ), ] -AgentObject = Literal["agent",] - - class AgentTypedDict(TypedDict): model: str name: str @@ -70,7 +101,7 @@ class AgentTypedDict(TypedDict): description: NotRequired[Nullable[str]] handoffs: NotRequired[Nullable[List[str]]] metadata: NotRequired[Nullable[Dict[str, Any]]] - object: NotRequired[AgentObject] + object: Literal["agent"] version_message: NotRequired[Nullable[str]] @@ -108,51 +139,53 @@ class Agent(BaseModel): metadata: OptionalNullable[Dict[str, Any]] = UNSET - object: Optional[AgentObject] = "agent" + object: Annotated[ + Annotated[Optional[Literal["agent"]], AfterValidator(validate_const("agent"))], + pydantic.Field(alias="object"), + ] = "agent" version_message: OptionalNullable[str] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "description", - "handoffs", - "metadata", - "object", - "version_message", - ] - nullable_fields = [ - "instructions", - "description", - "handoffs", - "metadata", - "version_message", - ] - null_default_fields = [] - + optional_fields = set( + [ + "instructions", + "tools", + "completion_args", + "description", + "handoffs", + "metadata", + "object", + "version_message", + ] + ) + nullable_fields = set( + ["instructions", "description", "handoffs", "metadata", "version_message"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + Agent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agentconversation.py b/src/mistralai/client/models/agentconversation.py index a850d54c..da30c663 100644 --- a/src/mistralai/client/models/agentconversation.py +++ b/src/mistralai/client/models/agentconversation.py @@ -10,12 +10,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Any, Dict, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -AgentConversationObject = Literal["conversation",] +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict AgentConversationAgentVersionTypedDict = TypeAliasType( @@ -39,7 +39,7 @@ class AgentConversationTypedDict(TypedDict): r"""Description of the what the conversation is about.""" metadata: NotRequired[Nullable[Dict[str, Any]]] r"""Custom metadata for the conversation.""" - object: NotRequired[AgentConversationObject] + object: Literal["conversation"] agent_version: NotRequired[Nullable[AgentConversationAgentVersionTypedDict]] @@ -61,36 +61,45 @@ class AgentConversation(BaseModel): metadata: OptionalNullable[Dict[str, Any]] = UNSET r"""Custom metadata for the conversation.""" - object: Optional[AgentConversationObject] = "conversation" + object: Annotated[ + Annotated[ + Optional[Literal["conversation"]], + AfterValidator(validate_const("conversation")), + ], + pydantic.Field(alias="object"), + ] = "conversation" agent_version: OptionalNullable[AgentConversationAgentVersion] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["name", "description", "metadata", "object", "agent_version"] - nullable_fields = ["name", "description", "metadata", "agent_version"] - null_default_fields = [] - + optional_fields = set( + ["name", "description", "metadata", "object", "agent_version"] + ) + nullable_fields = set(["name", "description", "metadata", "agent_version"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + AgentConversation.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agenthandoffdoneevent.py b/src/mistralai/client/models/agenthandoffdoneevent.py index 40bf8497..e2609e3d 100644 --- a/src/mistralai/client/models/agenthandoffdoneevent.py +++ b/src/mistralai/client/models/agenthandoffdoneevent.py @@ -3,9 +3,10 @@ from __future__ import annotations from datetime import datetime -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -27,7 +28,7 @@ class AgentHandoffDoneEvent(BaseModel): next_agent_name: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["agent.handoff.done"], AfterValidator(validate_const("agent.handoff.done")), @@ -38,3 +39,25 @@ class AgentHandoffDoneEvent(BaseModel): created_at: Optional[datetime] = None output_index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + AgentHandoffDoneEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agenthandoffentry.py b/src/mistralai/client/models/agenthandoffentry.py index b18fe17c..f92ef2cc 100644 --- a/src/mistralai/client/models/agenthandoffentry.py +++ b/src/mistralai/client/models/agenthandoffentry.py @@ -10,15 +10,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AgentHandoffEntryObject = Literal["entry",] - - -AgentHandoffEntryType = Literal["agent.handoff",] +from typing_extensions import Annotated, NotRequired, TypedDict class AgentHandoffEntryTypedDict(TypedDict): @@ -26,8 +23,8 @@ class AgentHandoffEntryTypedDict(TypedDict): previous_agent_name: str next_agent_id: str next_agent_name: str - object: NotRequired[AgentHandoffEntryObject] - type: NotRequired[AgentHandoffEntryType] + object: Literal["entry"] + type: Literal["agent.handoff"] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] id: NotRequired[str] @@ -42,9 +39,18 @@ class AgentHandoffEntry(BaseModel): next_agent_name: str - object: Optional[AgentHandoffEntryObject] = "entry" + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" - type: Optional[AgentHandoffEntryType] = "agent.handoff" + type: Annotated[ + Annotated[ + Optional[Literal["agent.handoff"]], + AfterValidator(validate_const("agent.handoff")), + ], + pydantic.Field(alias="type"), + ] = "agent.handoff" created_at: Optional[datetime] = None @@ -54,30 +60,31 @@ class AgentHandoffEntry(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id"] - nullable_fields = ["completed_at"] - null_default_fields = [] - + optional_fields = set(["object", "type", "created_at", "completed_at", "id"]) + nullable_fields = set(["completed_at"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + AgentHandoffEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agenthandoffstartedevent.py b/src/mistralai/client/models/agenthandoffstartedevent.py index e278aef3..2a402341 100644 --- a/src/mistralai/client/models/agenthandoffstartedevent.py +++ b/src/mistralai/client/models/agenthandoffstartedevent.py @@ -3,9 +3,10 @@ from __future__ import annotations from datetime import datetime -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -27,7 +28,7 @@ class AgentHandoffStartedEvent(BaseModel): previous_agent_name: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["agent.handoff.started"], AfterValidator(validate_const("agent.handoff.started")), @@ -38,3 +39,25 @@ class AgentHandoffStartedEvent(BaseModel): created_at: Optional[datetime] = None output_index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + AgentHandoffStartedEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/createorupdateagentaliasop.py b/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py similarity index 80% rename from src/mistralai/client/models/createorupdateagentaliasop.py rename to src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py index cde1dd05..04761ae7 100644 --- a/src/mistralai/client/models/createorupdateagentaliasop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: a79cf28bda01 +# @generated-id: 23a832f8f175 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,13 +7,13 @@ from typing_extensions import Annotated, TypedDict -class CreateOrUpdateAgentAliasRequestTypedDict(TypedDict): +class AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict(TypedDict): agent_id: str alias: str version: int -class CreateOrUpdateAgentAliasRequest(BaseModel): +class AgentsAPIV1AgentsCreateOrUpdateAliasRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/deleteagentaliasop.py b/src/mistralai/client/models/agents_api_v1_agents_delete_aliasop.py similarity index 78% rename from src/mistralai/client/models/deleteagentaliasop.py rename to src/mistralai/client/models/agents_api_v1_agents_delete_aliasop.py index c52d099e..291a9802 100644 --- a/src/mistralai/client/models/deleteagentaliasop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_delete_aliasop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: e4d0d7f75b24 +# @generated-id: 9c9947e768d3 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class DeleteAgentAliasRequestTypedDict(TypedDict): +class AgentsAPIV1AgentsDeleteAliasRequestTypedDict(TypedDict): agent_id: str alias: str -class DeleteAgentAliasRequest(BaseModel): +class AgentsAPIV1AgentsDeleteAliasRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/listagentaliasesop.py b/src/mistralai/client/models/agents_api_v1_agents_deleteop.py similarity index 74% rename from src/mistralai/client/models/listagentaliasesop.py rename to src/mistralai/client/models/agents_api_v1_agents_deleteop.py index 83c6d176..5e41fdcd 100644 --- a/src/mistralai/client/models/listagentaliasesop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_deleteop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ff038766a902 +# @generated-id: 95adb6768908 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class ListAgentAliasesRequestTypedDict(TypedDict): +class AgentsAPIV1AgentsDeleteRequestTypedDict(TypedDict): agent_id: str -class ListAgentAliasesRequest(BaseModel): +class AgentsAPIV1AgentsDeleteRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/getagentversionop.py b/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py similarity index 78% rename from src/mistralai/client/models/getagentversionop.py rename to src/mistralai/client/models/agents_api_v1_agents_get_versionop.py index 77b8a266..941863d0 100644 --- a/src/mistralai/client/models/getagentversionop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: a0db5a6aab1f +# @generated-id: ef9914284afb from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class GetAgentVersionRequestTypedDict(TypedDict): +class AgentsAPIV1AgentsGetVersionRequestTypedDict(TypedDict): agent_id: str version: str -class GetAgentVersionRequest(BaseModel): +class AgentsAPIV1AgentsGetVersionRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_getop.py b/src/mistralai/client/models/agents_api_v1_agents_getop.py new file mode 100644 index 00000000..dd17580d --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_getop.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f5918c34f1c7 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentsAPIV1AgentsGetAgentVersionTypedDict = TypeAliasType( + "AgentsAPIV1AgentsGetAgentVersionTypedDict", Union[int, str] +) + + +AgentsAPIV1AgentsGetAgentVersion = TypeAliasType( + "AgentsAPIV1AgentsGetAgentVersion", Union[int, str] +) + + +class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): + agent_id: str + agent_version: NotRequired[Nullable[AgentsAPIV1AgentsGetAgentVersionTypedDict]] + + +class AgentsAPIV1AgentsGetRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + agent_version: Annotated[ + OptionalNullable[AgentsAPIV1AgentsGetAgentVersion], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["agent_version"]) + nullable_fields = set(["agent_version"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/deleteagentop.py b/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py similarity index 71% rename from src/mistralai/client/models/deleteagentop.py rename to src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py index 8b14bca7..bb1da602 100644 --- a/src/mistralai/client/models/deleteagentop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 089fb7f87aea +# @generated-id: a04815e6c798 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class DeleteAgentRequestTypedDict(TypedDict): +class AgentsAPIV1AgentsListVersionAliasesRequestTypedDict(TypedDict): agent_id: str -class DeleteAgentRequest(BaseModel): +class AgentsAPIV1AgentsListVersionAliasesRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/listagentversionsop.py b/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py similarity index 56% rename from src/mistralai/client/models/listagentversionsop.py rename to src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py index 613d3d85..54b62e90 100644 --- a/src/mistralai/client/models/listagentversionsop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py @@ -1,14 +1,15 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ccc5fb48e78f +# @generated-id: 19e3310c3907 from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer from typing import Optional from typing_extensions import Annotated, NotRequired, TypedDict -class ListAgentVersionsRequestTypedDict(TypedDict): +class AgentsAPIV1AgentsListVersionsRequestTypedDict(TypedDict): agent_id: str page: NotRequired[int] r"""Page number (0-indexed)""" @@ -16,7 +17,7 @@ class ListAgentVersionsRequestTypedDict(TypedDict): r"""Number of versions per page""" -class ListAgentVersionsRequest(BaseModel): +class AgentsAPIV1AgentsListVersionsRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] @@ -32,3 +33,19 @@ class ListAgentVersionsRequest(BaseModel): FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = 20 r"""Number of versions per page""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["page", "page_size"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/listagentsop.py b/src/mistralai/client/models/agents_api_v1_agents_listop.py similarity index 70% rename from src/mistralai/client/models/listagentsop.py rename to src/mistralai/client/models/agents_api_v1_agents_listop.py index 863fc13a..97b1c7f1 100644 --- a/src/mistralai/client/models/listagentsop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_listop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: a573a873c404 +# @generated-id: 25a6460a6e19 from __future__ import annotations from .requestsource import RequestSource @@ -16,7 +16,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class ListAgentsRequestTypedDict(TypedDict): +class AgentsAPIV1AgentsListRequestTypedDict(TypedDict): page: NotRequired[int] r"""Page number (0-indexed)""" page_size: NotRequired[int] @@ -31,7 +31,7 @@ class ListAgentsRequestTypedDict(TypedDict): metadata: NotRequired[Nullable[Dict[str, Any]]] -class ListAgentsRequest(BaseModel): +class AgentsAPIV1AgentsListRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -78,46 +78,38 @@ class ListAgentsRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "deployment_chat", - "sources", - "name", - "search", - "id", - "metadata", - ] - nullable_fields = [ - "deployment_chat", - "sources", - "name", - "search", - "id", - "metadata", - ] - null_default_fields = [] - + optional_fields = set( + [ + "page", + "page_size", + "deployment_chat", + "sources", + "name", + "search", + "id", + "metadata", + ] + ) + nullable_fields = set( + ["deployment_chat", "sources", "name", "search", "id", "metadata"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/updateagentversionop.py b/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py similarity index 78% rename from src/mistralai/client/models/updateagentversionop.py rename to src/mistralai/client/models/agents_api_v1_agents_update_versionop.py index 114013bc..5ab821ea 100644 --- a/src/mistralai/client/models/updateagentversionop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 3821dca5b20a +# @generated-id: 63f61b8891bf from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class UpdateAgentVersionRequestTypedDict(TypedDict): +class AgentsAPIV1AgentsUpdateVersionRequestTypedDict(TypedDict): agent_id: str version: int -class UpdateAgentVersionRequest(BaseModel): +class AgentsAPIV1AgentsUpdateVersionRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/updateagentop.py b/src/mistralai/client/models/agents_api_v1_agents_updateop.py similarity index 62% rename from src/mistralai/client/models/updateagentop.py rename to src/mistralai/client/models/agents_api_v1_agents_updateop.py index 28acc83d..69da5001 100644 --- a/src/mistralai/client/models/updateagentop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_updateop.py @@ -1,24 +1,24 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ae3a6abea468 +# @generated-id: bb55993c932d from __future__ import annotations -from .agentupdaterequest import AgentUpdateRequest, AgentUpdateRequestTypedDict +from .updateagentrequest import UpdateAgentRequest, UpdateAgentRequestTypedDict from mistralai.client.types import BaseModel from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata from typing_extensions import Annotated, TypedDict -class UpdateAgentRequestTypedDict(TypedDict): +class AgentsAPIV1AgentsUpdateRequestTypedDict(TypedDict): agent_id: str - agent_update_request: AgentUpdateRequestTypedDict + update_agent_request: UpdateAgentRequestTypedDict -class UpdateAgentRequest(BaseModel): +class AgentsAPIV1AgentsUpdateRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] - agent_update_request: Annotated[ - AgentUpdateRequest, + update_agent_request: Annotated[ + UpdateAgentRequest, FieldMetadata(request=RequestMetadata(media_type="application/json")), ] diff --git a/src/mistralai/client/models/appendconversationstreamop.py b/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py similarity index 85% rename from src/mistralai/client/models/appendconversationstreamop.py rename to src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py index 55efca0e..d257dc78 100644 --- a/src/mistralai/client/models/appendconversationstreamop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 1ab08b189e9d +# @generated-id: ec00e0905f15 from __future__ import annotations from .conversationappendstreamrequest import ( @@ -11,13 +11,13 @@ from typing_extensions import Annotated, TypedDict -class AppendConversationStreamRequestTypedDict(TypedDict): +class AgentsAPIV1ConversationsAppendStreamRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation to which we append entries.""" conversation_append_stream_request: ConversationAppendStreamRequestTypedDict -class AppendConversationStreamRequest(BaseModel): +class AgentsAPIV1ConversationsAppendStreamRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/appendconversationop.py b/src/mistralai/client/models/agents_api_v1_conversations_appendop.py similarity index 85% rename from src/mistralai/client/models/appendconversationop.py rename to src/mistralai/client/models/agents_api_v1_conversations_appendop.py index 710b8e1c..61fec083 100644 --- a/src/mistralai/client/models/appendconversationop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_appendop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 1c47dd1e7c7e +# @generated-id: 39c6125e850c from __future__ import annotations from .conversationappendrequest import ( @@ -11,13 +11,13 @@ from typing_extensions import Annotated, TypedDict -class AppendConversationRequestTypedDict(TypedDict): +class AgentsAPIV1ConversationsAppendRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation to which we append entries.""" conversation_append_request: ConversationAppendRequestTypedDict -class AppendConversationRequest(BaseModel): +class AgentsAPIV1ConversationsAppendRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/deleteconversationop.py b/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py similarity index 78% rename from src/mistralai/client/models/deleteconversationop.py rename to src/mistralai/client/models/agents_api_v1_conversations_deleteop.py index 39607f40..499645a7 100644 --- a/src/mistralai/client/models/deleteconversationop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 86fefc353db0 +# @generated-id: 0792e6abbdcb from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class DeleteConversationRequestTypedDict(TypedDict): +class AgentsAPIV1ConversationsDeleteRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation from which we are fetching metadata.""" -class DeleteConversationRequest(BaseModel): +class AgentsAPIV1ConversationsDeleteRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/getconversationop.py b/src/mistralai/client/models/agents_api_v1_conversations_getop.py similarity index 88% rename from src/mistralai/client/models/getconversationop.py rename to src/mistralai/client/models/agents_api_v1_conversations_getop.py index d204d175..504616ab 100644 --- a/src/mistralai/client/models/getconversationop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_getop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 1a622b8337ac +# @generated-id: c530f2fc64d0 from __future__ import annotations from .agentconversation import AgentConversation, AgentConversationTypedDict @@ -10,12 +10,12 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -class GetConversationRequestTypedDict(TypedDict): +class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation from which we are fetching metadata.""" -class GetConversationRequest(BaseModel): +class AgentsAPIV1ConversationsGetRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/getconversationhistoryop.py b/src/mistralai/client/models/agents_api_v1_conversations_historyop.py similarity index 78% rename from src/mistralai/client/models/getconversationhistoryop.py rename to src/mistralai/client/models/agents_api_v1_conversations_historyop.py index c1fbf3de..ef0a4eb0 100644 --- a/src/mistralai/client/models/getconversationhistoryop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_historyop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: c863a4cbeb34 +# @generated-id: 2f5ca33768aa from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class GetConversationHistoryRequestTypedDict(TypedDict): +class AgentsAPIV1ConversationsHistoryRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation from which we are fetching entries.""" -class GetConversationHistoryRequest(BaseModel): +class AgentsAPIV1ConversationsHistoryRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/listconversationsop.py b/src/mistralai/client/models/agents_api_v1_conversations_listop.py similarity index 59% rename from src/mistralai/client/models/listconversationsop.py rename to src/mistralai/client/models/agents_api_v1_conversations_listop.py index 1c9a347c..8bf66aea 100644 --- a/src/mistralai/client/models/listconversationsop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_listop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: d6007f6c1643 +# @generated-id: 936e36181d36 from __future__ import annotations from .agentconversation import AgentConversation, AgentConversationTypedDict @@ -17,13 +17,13 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -class ListConversationsRequestTypedDict(TypedDict): +class AgentsAPIV1ConversationsListRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] metadata: NotRequired[Nullable[Dict[str, Any]]] -class ListConversationsRequest(BaseModel): +class AgentsAPIV1ConversationsListRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -41,41 +41,36 @@ class ListConversationsRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["page", "page_size", "metadata"] - nullable_fields = ["metadata"] - null_default_fields = [] - + optional_fields = set(["page", "page_size", "metadata"]) + nullable_fields = set(["metadata"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m -ListConversationsResponseTypedDict = TypeAliasType( - "ListConversationsResponseTypedDict", +AgentsAPIV1ConversationsListResponseTypedDict = TypeAliasType( + "AgentsAPIV1ConversationsListResponseTypedDict", Union[AgentConversationTypedDict, ModelConversationTypedDict], ) -ListConversationsResponse = TypeAliasType( - "ListConversationsResponse", Union[AgentConversation, ModelConversation] +AgentsAPIV1ConversationsListResponse = TypeAliasType( + "AgentsAPIV1ConversationsListResponse", Union[AgentConversation, ModelConversation] ) diff --git a/src/mistralai/client/models/getconversationmessagesop.py b/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py similarity index 78% rename from src/mistralai/client/models/getconversationmessagesop.py rename to src/mistralai/client/models/agents_api_v1_conversations_messagesop.py index 6666198e..19978a19 100644 --- a/src/mistralai/client/models/getconversationmessagesop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: bb8a90ba7c22 +# @generated-id: b5141764a708 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class GetConversationMessagesRequestTypedDict(TypedDict): +class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation from which we are fetching messages.""" -class GetConversationMessagesRequest(BaseModel): +class AgentsAPIV1ConversationsMessagesRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/restartconversationstreamop.py b/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py similarity index 85% rename from src/mistralai/client/models/restartconversationstreamop.py rename to src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py index 3b2025f5..63c74449 100644 --- a/src/mistralai/client/models/restartconversationstreamop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 16dc9ee5bf22 +# @generated-id: c284a1711148 from __future__ import annotations from .conversationrestartstreamrequest import ( @@ -11,13 +11,13 @@ from typing_extensions import Annotated, TypedDict -class RestartConversationStreamRequestTypedDict(TypedDict): +class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict): conversation_id: str r"""ID of the original conversation which is being restarted.""" conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict -class RestartConversationStreamRequest(BaseModel): +class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/restartconversationop.py b/src/mistralai/client/models/agents_api_v1_conversations_restartop.py similarity index 85% rename from src/mistralai/client/models/restartconversationop.py rename to src/mistralai/client/models/agents_api_v1_conversations_restartop.py index b09eaed5..3186d5df 100644 --- a/src/mistralai/client/models/restartconversationop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_restartop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 2f6f3e4bbfd8 +# @generated-id: 3ba234e5a8fc from __future__ import annotations from .conversationrestartrequest import ( @@ -11,13 +11,13 @@ from typing_extensions import Annotated, TypedDict -class RestartConversationRequestTypedDict(TypedDict): +class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict): conversation_id: str r"""ID of the original conversation which is being restarted.""" conversation_restart_request: ConversationRestartRequestTypedDict -class RestartConversationRequest(BaseModel): +class AgentsAPIV1ConversationsRestartRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agentscompletionrequest.py b/src/mistralai/client/models/agentscompletionrequest.py index f4a2d646..6955f6ac 100644 --- a/src/mistralai/client/models/agentscompletionrequest.py +++ b/src/mistralai/client/models/agentscompletionrequest.py @@ -148,52 +148,44 @@ class AgentsCompletionRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - ] - nullable_fields = [ - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - + optional_fields = set( + [ + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + ) + nullable_fields = set( + ["max_tokens", "random_seed", "metadata", "tools", "n", "prompt_mode"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/agentscompletionstreamrequest.py b/src/mistralai/client/models/agentscompletionstreamrequest.py index 732e2402..c2cf3552 100644 --- a/src/mistralai/client/models/agentscompletionstreamrequest.py +++ b/src/mistralai/client/models/agentscompletionstreamrequest.py @@ -146,52 +146,44 @@ class AgentsCompletionStreamRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - ] - nullable_fields = [ - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - + optional_fields = set( + [ + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + ) + nullable_fields = set( + ["max_tokens", "random_seed", "metadata", "tools", "n", "prompt_mode"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/archiveftmodelout.py b/src/mistralai/client/models/archiveftmodelout.py deleted file mode 100644 index 3107116c..00000000 --- a/src/mistralai/client/models/archiveftmodelout.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: bab499599d30 - -from __future__ import annotations -from mistralai.client.types import BaseModel -from mistralai.client.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class ArchiveFTModelOutTypedDict(TypedDict): - id: str - object: Literal["model"] - archived: NotRequired[bool] - - -class ArchiveFTModelOut(BaseModel): - id: str - - OBJECT: Annotated[ - Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], - pydantic.Field(alias="object"), - ] = "model" - - archived: Optional[bool] = True diff --git a/src/mistralai/client/models/archivemodelresponse.py b/src/mistralai/client/models/archivemodelresponse.py new file mode 100644 index 00000000..f1116850 --- /dev/null +++ b/src/mistralai/client/models/archivemodelresponse.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2d22c644df64 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ArchiveModelResponseTypedDict(TypedDict): + id: str + object: Literal["model"] + archived: NotRequired[bool] + + +class ArchiveModelResponse(BaseModel): + id: str + + object: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" + + archived: Optional[bool] = True + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "archived"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ArchiveModelResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/assistantmessage.py b/src/mistralai/client/models/assistantmessage.py index 5a4a2085..26a778c7 100644 --- a/src/mistralai/client/models/assistantmessage.py +++ b/src/mistralai/client/models/assistantmessage.py @@ -11,9 +11,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict AssistantMessageContentTypedDict = TypeAliasType( @@ -26,18 +29,22 @@ ) -AssistantMessageRole = Literal["assistant",] - - class AssistantMessageTypedDict(TypedDict): + role: Literal["assistant"] content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - role: NotRequired[AssistantMessageRole] class AssistantMessage(BaseModel): + role: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" + content: OptionalNullable[AssistantMessageContent] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET @@ -45,34 +52,33 @@ class AssistantMessage(BaseModel): prefix: Optional[bool] = False r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - role: Optional[AssistantMessageRole] = "assistant" - @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["content", "tool_calls", "prefix", "role"] - nullable_fields = ["content", "tool_calls"] - null_default_fields = [] - + optional_fields = set(["role", "content", "tool_calls", "prefix"]) + nullable_fields = set(["content", "tool_calls"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + AssistantMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/audiochunk.py b/src/mistralai/client/models/audiochunk.py index a5186827..68866cd2 100644 --- a/src/mistralai/client/models/audiochunk.py +++ b/src/mistralai/client/models/audiochunk.py @@ -18,9 +18,15 @@ class AudioChunkTypedDict(TypedDict): class AudioChunk(BaseModel): input_audio: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["input_audio"], AfterValidator(validate_const("input_audio")) ], pydantic.Field(alias="type"), ] = "input_audio" + + +try: + AudioChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/audiotranscriptionrequest.py b/src/mistralai/client/models/audiotranscriptionrequest.py index 8c47a83c..fe4c79e3 100644 --- a/src/mistralai/client/models/audiotranscriptionrequest.py +++ b/src/mistralai/client/models/audiotranscriptionrequest.py @@ -58,7 +58,7 @@ class AudioTranscriptionRequest(BaseModel): UNSET ) - STREAM: Annotated[ + stream: Annotated[ Annotated[Optional[Literal[False]], AfterValidator(validate_const(False))], pydantic.Field(alias="stream"), FieldMetadata(multipart=True), @@ -75,40 +75,43 @@ class AudioTranscriptionRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "file", - "file_url", - "file_id", - "language", - "temperature", - "stream", - "diarize", - "context_bias", - "timestamp_granularities", - ] - nullable_fields = ["file_url", "file_id", "language", "temperature"] - null_default_fields = [] - + optional_fields = set( + [ + "file", + "file_url", + "file_id", + "language", + "temperature", + "stream", + "diarize", + "context_bias", + "timestamp_granularities", + ] + ) + nullable_fields = set(["file_url", "file_id", "language", "temperature"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + AudioTranscriptionRequest.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/audiotranscriptionrequeststream.py b/src/mistralai/client/models/audiotranscriptionrequeststream.py index a080cee2..2d1e9269 100644 --- a/src/mistralai/client/models/audiotranscriptionrequeststream.py +++ b/src/mistralai/client/models/audiotranscriptionrequeststream.py @@ -56,7 +56,7 @@ class AudioTranscriptionRequestStream(BaseModel): UNSET ) - STREAM: Annotated[ + stream: Annotated[ Annotated[Optional[Literal[True]], AfterValidator(validate_const(True))], pydantic.Field(alias="stream"), FieldMetadata(multipart=True), @@ -73,40 +73,43 @@ class AudioTranscriptionRequestStream(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "file", - "file_url", - "file_id", - "language", - "temperature", - "stream", - "diarize", - "context_bias", - "timestamp_granularities", - ] - nullable_fields = ["file_url", "file_id", "language", "temperature"] - null_default_fields = [] - + optional_fields = set( + [ + "file", + "file_url", + "file_id", + "language", + "temperature", + "stream", + "diarize", + "context_bias", + "timestamp_granularities", + ] + ) + nullable_fields = set(["file_url", "file_id", "language", "temperature"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + AudioTranscriptionRequestStream.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/basemodelcard.py b/src/mistralai/client/models/basemodelcard.py index 17a3e5c9..9c9e9a20 100644 --- a/src/mistralai/client/models/basemodelcard.py +++ b/src/mistralai/client/models/basemodelcard.py @@ -60,54 +60,59 @@ class BaseModelCard(BaseModel): default_model_temperature: OptionalNullable[float] = UNSET - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["base"], AfterValidator(validate_const("base"))], pydantic.Field(alias="type"), ] = "base" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "object", - "created", - "owned_by", - "name", - "description", - "max_context_length", - "aliases", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - ] - nullable_fields = [ - "name", - "description", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - ] - null_default_fields = [] - + optional_fields = set( + [ + "object", + "created", + "owned_by", + "name", + "description", + "max_context_length", + "aliases", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + ] + ) + nullable_fields = set( + [ + "name", + "description", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + BaseModelCard.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/batcherror.py b/src/mistralai/client/models/batcherror.py index c1bf722a..8a353cd2 100644 --- a/src/mistralai/client/models/batcherror.py +++ b/src/mistralai/client/models/batcherror.py @@ -2,7 +2,8 @@ # @generated-id: 1563e2a576ec from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -16,3 +17,19 @@ class BatchError(BaseModel): message: str count: Optional[int] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["count"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/batchjobout.py b/src/mistralai/client/models/batchjob.py similarity index 64% rename from src/mistralai/client/models/batchjobout.py rename to src/mistralai/client/models/batchjob.py index 99c2b951..80acac33 100644 --- a/src/mistralai/client/models/batchjobout.py +++ b/src/mistralai/client/models/batchjob.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: cbf1d872a46e +# @generated-id: 85cd28932cc7 from __future__ import annotations from .batcherror import BatchError, BatchErrorTypedDict @@ -19,7 +19,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class BatchJobOutTypedDict(TypedDict): +class BatchJobTypedDict(TypedDict): id: str input_files: List[str] endpoint: str @@ -41,7 +41,7 @@ class BatchJobOutTypedDict(TypedDict): completed_at: NotRequired[Nullable[int]] -class BatchJobOut(BaseModel): +class BatchJob(BaseModel): id: str input_files: List[str] @@ -62,7 +62,7 @@ class BatchJobOut(BaseModel): failed_requests: int - OBJECT: Annotated[ + object: Annotated[ Annotated[Optional[Literal["batch"]], AfterValidator(validate_const("batch"))], pydantic.Field(alias="object"), ] = "batch" @@ -85,49 +85,54 @@ class BatchJobOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "object", - "metadata", - "model", - "agent_id", - "output_file", - "error_file", - "outputs", - "started_at", - "completed_at", - ] - nullable_fields = [ - "metadata", - "model", - "agent_id", - "output_file", - "error_file", - "outputs", - "started_at", - "completed_at", - ] - null_default_fields = [] - + optional_fields = set( + [ + "object", + "metadata", + "model", + "agent_id", + "output_file", + "error_file", + "outputs", + "started_at", + "completed_at", + ] + ) + nullable_fields = set( + [ + "metadata", + "model", + "agent_id", + "output_file", + "error_file", + "outputs", + "started_at", + "completed_at", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + BatchJob.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/batchjobsout.py b/src/mistralai/client/models/batchjobsout.py deleted file mode 100644 index f65fc040..00000000 --- a/src/mistralai/client/models/batchjobsout.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 20b2516e7efa - -from __future__ import annotations -from .batchjobout import BatchJobOut, BatchJobOutTypedDict -from mistralai.client.types import BaseModel -from mistralai.client.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class BatchJobsOutTypedDict(TypedDict): - total: int - data: NotRequired[List[BatchJobOutTypedDict]] - object: Literal["list"] - - -class BatchJobsOut(BaseModel): - total: int - - data: Optional[List[BatchJobOut]] = None - - OBJECT: Annotated[ - Annotated[Optional[Literal["list"]], AfterValidator(validate_const("list"))], - pydantic.Field(alias="object"), - ] = "list" diff --git a/src/mistralai/client/models/batchrequest.py b/src/mistralai/client/models/batchrequest.py index 41c45234..911a9a05 100644 --- a/src/mistralai/client/models/batchrequest.py +++ b/src/mistralai/client/models/batchrequest.py @@ -26,30 +26,25 @@ class BatchRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["custom_id"] - nullable_fields = ["custom_id"] - null_default_fields = [] - + optional_fields = set(["custom_id"]) + nullable_fields = set(["custom_id"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/cancelfinetuningjobop.py b/src/mistralai/client/models/cancelfinetuningjobop.py deleted file mode 100644 index ddd445bb..00000000 --- a/src/mistralai/client/models/cancelfinetuningjobop.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: c9a1b39f0d02 - -from __future__ import annotations -from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutTypedDict, -) -from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutTypedDict, -) -from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata -from pydantic import Field -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class CancelFineTuningJobRequestTypedDict(TypedDict): - job_id: str - r"""The ID of the job to cancel.""" - - -class CancelFineTuningJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the job to cancel.""" - - -CancelFineTuningJobResponseTypedDict = TypeAliasType( - "CancelFineTuningJobResponseTypedDict", - Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], -) -r"""OK""" - - -CancelFineTuningJobResponse = Annotated[ - Union[ClassifierDetailedJobOut, CompletionDetailedJobOut], - Field(discriminator="JOB_TYPE"), -] -r"""OK""" diff --git a/src/mistralai/client/models/chatclassificationrequest.py b/src/mistralai/client/models/chatclassificationrequest.py index 8b6d07b9..cf2aa78a 100644 --- a/src/mistralai/client/models/chatclassificationrequest.py +++ b/src/mistralai/client/models/chatclassificationrequest.py @@ -4,18 +4,17 @@ from __future__ import annotations from .inputs import Inputs, InputsTypedDict from mistralai.client.types import BaseModel -import pydantic -from typing_extensions import Annotated, TypedDict +from typing_extensions import TypedDict class ChatClassificationRequestTypedDict(TypedDict): model: str - inputs: InputsTypedDict + input: InputsTypedDict r"""Chat to classify""" class ChatClassificationRequest(BaseModel): model: str - inputs: Annotated[Inputs, pydantic.Field(alias="input")] + input: Inputs r"""Chat to classify""" diff --git a/src/mistralai/client/models/chatcompletionrequest.py b/src/mistralai/client/models/chatcompletionrequest.py index 4f7d071b..e871bd92 100644 --- a/src/mistralai/client/models/chatcompletionrequest.py +++ b/src/mistralai/client/models/chatcompletionrequest.py @@ -171,56 +171,55 @@ class ChatCompletionRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - "safe_prompt", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/chatcompletionstreamrequest.py b/src/mistralai/client/models/chatcompletionstreamrequest.py index ec7d2ae1..b7b2bff1 100644 --- a/src/mistralai/client/models/chatcompletionstreamrequest.py +++ b/src/mistralai/client/models/chatcompletionstreamrequest.py @@ -169,56 +169,55 @@ class ChatCompletionStreamRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - "safe_prompt", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/chatmoderationrequest.py b/src/mistralai/client/models/chatmoderationrequest.py index a8d021e8..228e7d26 100644 --- a/src/mistralai/client/models/chatmoderationrequest.py +++ b/src/mistralai/client/models/chatmoderationrequest.py @@ -86,3 +86,9 @@ class ChatModerationRequest(BaseModel): r"""Chat to classify""" model: str + + +try: + ChatModerationRequest.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/checkpointout.py b/src/mistralai/client/models/checkpoint.py similarity index 81% rename from src/mistralai/client/models/checkpointout.py rename to src/mistralai/client/models/checkpoint.py index 3e8d90e9..c24e433e 100644 --- a/src/mistralai/client/models/checkpointout.py +++ b/src/mistralai/client/models/checkpoint.py @@ -1,14 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 3866fe32cd7c +# @generated-id: 1a530d3674d8 from __future__ import annotations -from .metricout import MetricOut, MetricOutTypedDict +from .metric import Metric, MetricTypedDict from mistralai.client.types import BaseModel from typing_extensions import TypedDict -class CheckpointOutTypedDict(TypedDict): - metrics: MetricOutTypedDict +class CheckpointTypedDict(TypedDict): + metrics: MetricTypedDict r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" step_number: int r"""The step number that the checkpoint was created at.""" @@ -16,8 +16,8 @@ class CheckpointOutTypedDict(TypedDict): r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" -class CheckpointOut(BaseModel): - metrics: MetricOut +class Checkpoint(BaseModel): + metrics: Metric r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" step_number: int diff --git a/src/mistralai/client/models/classificationrequest.py b/src/mistralai/client/models/classificationrequest.py index 903706c3..25b69413 100644 --- a/src/mistralai/client/models/classificationrequest.py +++ b/src/mistralai/client/models/classificationrequest.py @@ -46,30 +46,31 @@ class ClassificationRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["metadata"] - nullable_fields = ["metadata"] - null_default_fields = [] - + optional_fields = set(["metadata"]) + nullable_fields = set(["metadata"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ClassificationRequest.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/classifierdetailedjobout.py b/src/mistralai/client/models/classifierdetailedjobout.py deleted file mode 100644 index bc5c5381..00000000 --- a/src/mistralai/client/models/classifierdetailedjobout.py +++ /dev/null @@ -1,169 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: d8daeb39ef9f - -from __future__ import annotations -from .checkpointout import CheckpointOut, CheckpointOutTypedDict -from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict -from .classifiertrainingparameters import ( - ClassifierTrainingParameters, - ClassifierTrainingParametersTypedDict, -) -from .eventout import EventOut, EventOutTypedDict -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict -from mistralai.client.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, - UnrecognizedStr, -) -from mistralai.client.utils import validate_const -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypedDict - - -ClassifierDetailedJobOutStatus = Union[ - Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", - ], - UnrecognizedStr, -] - - -ClassifierDetailedJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict - - -ClassifierDetailedJobOutIntegration = WandbIntegrationOut - - -class ClassifierDetailedJobOutTypedDict(TypedDict): - id: str - auto_start: bool - model: str - status: ClassifierDetailedJobOutStatus - created_at: int - modified_at: int - training_files: List[str] - hyperparameters: ClassifierTrainingParametersTypedDict - classifier_targets: List[ClassifierTargetOutTypedDict] - validation_files: NotRequired[Nullable[List[str]]] - object: Literal["job"] - fine_tuned_model: NotRequired[Nullable[str]] - suffix: NotRequired[Nullable[str]] - integrations: NotRequired[ - Nullable[List[ClassifierDetailedJobOutIntegrationTypedDict]] - ] - trained_tokens: NotRequired[Nullable[int]] - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: Literal["classifier"] - events: NotRequired[List[EventOutTypedDict]] - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - checkpoints: NotRequired[List[CheckpointOutTypedDict]] - - -class ClassifierDetailedJobOut(BaseModel): - id: str - - auto_start: bool - - model: str - - status: ClassifierDetailedJobOutStatus - - created_at: int - - modified_at: int - - training_files: List[str] - - hyperparameters: ClassifierTrainingParameters - - classifier_targets: List[ClassifierTargetOut] - - validation_files: OptionalNullable[List[str]] = UNSET - - OBJECT: Annotated[ - Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], - pydantic.Field(alias="object"), - ] = "job" - - fine_tuned_model: OptionalNullable[str] = UNSET - - suffix: OptionalNullable[str] = UNSET - - integrations: OptionalNullable[List[ClassifierDetailedJobOutIntegration]] = UNSET - - trained_tokens: OptionalNullable[int] = UNSET - - metadata: OptionalNullable[JobMetadataOut] = UNSET - - JOB_TYPE: Annotated[ - Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], - pydantic.Field(alias="job_type"), - ] = "classifier" - - events: Optional[List[EventOut]] = None - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - - checkpoints: Optional[List[CheckpointOut]] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - "events", - "checkpoints", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/client/models/classifierftmodelout.py b/src/mistralai/client/models/classifierfinetunedmodel.py similarity index 56% rename from src/mistralai/client/models/classifierftmodelout.py rename to src/mistralai/client/models/classifierfinetunedmodel.py index 182f4954..fbcf5892 100644 --- a/src/mistralai/client/models/classifierftmodelout.py +++ b/src/mistralai/client/models/classifierfinetunedmodel.py @@ -1,11 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 2903a7123b06 +# @generated-id: 5a9a7a0153c8 from __future__ import annotations -from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict -from .ftmodelcapabilitiesout import ( - FTModelCapabilitiesOut, - FTModelCapabilitiesOutTypedDict, +from .classifiertargetresult import ( + ClassifierTargetResult, + ClassifierTargetResultTypedDict, +) +from .finetunedmodelcapabilities import ( + FineTunedModelCapabilities, + FineTunedModelCapabilitiesTypedDict, ) from mistralai.client.types import ( BaseModel, @@ -22,7 +25,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class ClassifierFTModelOutTypedDict(TypedDict): +class ClassifierFineTunedModelTypedDict(TypedDict): id: str created: int owned_by: str @@ -30,9 +33,9 @@ class ClassifierFTModelOutTypedDict(TypedDict): root: str root_version: str archived: bool - capabilities: FTModelCapabilitiesOutTypedDict + capabilities: FineTunedModelCapabilitiesTypedDict job: str - classifier_targets: List[ClassifierTargetOutTypedDict] + classifier_targets: List[ClassifierTargetResultTypedDict] object: Literal["model"] name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] @@ -41,7 +44,7 @@ class ClassifierFTModelOutTypedDict(TypedDict): model_type: Literal["classifier"] -class ClassifierFTModelOut(BaseModel): +class ClassifierFineTunedModel(BaseModel): id: str created: int @@ -56,13 +59,13 @@ class ClassifierFTModelOut(BaseModel): archived: bool - capabilities: FTModelCapabilitiesOut + capabilities: FineTunedModelCapabilities job: str - classifier_targets: List[ClassifierTargetOut] + classifier_targets: List[ClassifierTargetResult] - OBJECT: Annotated[ + object: Annotated[ Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], pydantic.Field(alias="object"), ] = "model" @@ -75,43 +78,40 @@ class ClassifierFTModelOut(BaseModel): aliases: Optional[List[str]] = None - MODEL_TYPE: Annotated[ + model_type: Annotated[ Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], pydantic.Field(alias="model_type"), ] = "classifier" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "object", - "name", - "description", - "max_context_length", - "aliases", - ] - nullable_fields = ["name", "description"] - null_default_fields = [] - + optional_fields = set( + ["object", "name", "description", "max_context_length", "aliases"] + ) + nullable_fields = set(["name", "description"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ClassifierFineTunedModel.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/classifierjobout.py b/src/mistralai/client/models/classifierfinetuningjob.py similarity index 63% rename from src/mistralai/client/models/classifierjobout.py rename to src/mistralai/client/models/classifierfinetuningjob.py index 03a5b11c..fb160cf8 100644 --- a/src/mistralai/client/models/classifierjobout.py +++ b/src/mistralai/client/models/classifierfinetuningjob.py @@ -1,13 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: e19e9c4416cc +# @generated-id: a244d5f2afc5 from __future__ import annotations from .classifiertrainingparameters import ( ClassifierTrainingParameters, ClassifierTrainingParametersTypedDict, ) -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from .jobmetadata import JobMetadata, JobMetadataTypedDict +from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, +) from mistralai.client.types import ( BaseModel, Nullable, @@ -18,13 +21,13 @@ ) from mistralai.client.utils import validate_const import pydantic -from pydantic import model_serializer +from pydantic import ConfigDict, model_serializer from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional, Union +from typing import Any, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypedDict -ClassifierJobOutStatus = Union[ +ClassifierFineTuningJobStatus = Union[ Literal[ "QUEUED", "STARTED", @@ -42,18 +45,33 @@ r"""The current status of the fine-tuning job.""" -ClassifierJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict +ClassifierFineTuningJobIntegrationTypedDict = WandbIntegrationResultTypedDict + + +class UnknownClassifierFineTuningJobIntegration(BaseModel): + r"""A ClassifierFineTuningJobIntegration variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + +_CLASSIFIER_FINE_TUNING_JOB_INTEGRATION_VARIANTS: dict[str, Any] = { + "wandb": WandbIntegrationResult, +} -ClassifierJobOutIntegration = WandbIntegrationOut +ClassifierFineTuningJobIntegration = WandbIntegrationResult -class ClassifierJobOutTypedDict(TypedDict): + +class ClassifierFineTuningJobTypedDict(TypedDict): id: str r"""The ID of the job.""" auto_start: bool model: str - status: ClassifierJobOutStatus + status: ClassifierFineTuningJobStatus r"""The current status of the fine-tuning job.""" created_at: int r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" @@ -70,16 +88,18 @@ class ClassifierJobOutTypedDict(TypedDict): r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: NotRequired[Nullable[List[ClassifierJobOutIntegrationTypedDict]]] + integrations: NotRequired[ + Nullable[List[ClassifierFineTuningJobIntegrationTypedDict]] + ] r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: NotRequired[Nullable[int]] r"""Total number of tokens trained.""" - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + metadata: NotRequired[Nullable[JobMetadataTypedDict]] job_type: Literal["classifier"] r"""The type of job (`FT` for fine-tuning).""" -class ClassifierJobOut(BaseModel): +class ClassifierFineTuningJob(BaseModel): id: str r"""The ID of the job.""" @@ -87,7 +107,7 @@ class ClassifierJobOut(BaseModel): model: str - status: ClassifierJobOutStatus + status: ClassifierFineTuningJobStatus r"""The current status of the fine-tuning job.""" created_at: int @@ -104,7 +124,7 @@ class ClassifierJobOut(BaseModel): validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data.""" - OBJECT: Annotated[ + object: Annotated[ Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], pydantic.Field(alias="object"), ] = "job" @@ -116,15 +136,15 @@ class ClassifierJobOut(BaseModel): suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: OptionalNullable[List[ClassifierJobOutIntegration]] = UNSET + integrations: OptionalNullable[List[ClassifierFineTuningJobIntegration]] = UNSET r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: OptionalNullable[int] = UNSET r"""Total number of tokens trained.""" - metadata: OptionalNullable[JobMetadataOut] = UNSET + metadata: OptionalNullable[JobMetadata] = UNSET - JOB_TYPE: Annotated[ + job_type: Annotated[ Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], pydantic.Field(alias="job_type"), ] = "classifier" @@ -132,45 +152,50 @@ class ClassifierJobOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - + optional_fields = set( + [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) + nullable_fields = set( + [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ClassifierFineTuningJob.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/classifierfinetuningjobdetails.py b/src/mistralai/client/models/classifierfinetuningjobdetails.py new file mode 100644 index 00000000..5d73f55e --- /dev/null +++ b/src/mistralai/client/models/classifierfinetuningjobdetails.py @@ -0,0 +1,197 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 75c5dee8df2e + +from __future__ import annotations +from .checkpoint import Checkpoint, CheckpointTypedDict +from .classifiertargetresult import ( + ClassifierTargetResult, + ClassifierTargetResultTypedDict, +) +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, +) +from .event import Event, EventTypedDict +from .jobmetadata import JobMetadata, JobMetadataTypedDict +from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +ClassifierFineTuningJobDetailsStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, +] + + +ClassifierFineTuningJobDetailsIntegrationTypedDict = WandbIntegrationResultTypedDict + + +class UnknownClassifierFineTuningJobDetailsIntegration(BaseModel): + r"""A ClassifierFineTuningJobDetailsIntegration variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CLASSIFIER_FINE_TUNING_JOB_DETAILS_INTEGRATION_VARIANTS: dict[str, Any] = { + "wandb": WandbIntegrationResult, +} + + +ClassifierFineTuningJobDetailsIntegration = WandbIntegrationResult + + +class ClassifierFineTuningJobDetailsTypedDict(TypedDict): + id: str + auto_start: bool + model: str + status: ClassifierFineTuningJobDetailsStatus + created_at: int + modified_at: int + training_files: List[str] + hyperparameters: ClassifierTrainingParametersTypedDict + classifier_targets: List[ClassifierTargetResultTypedDict] + validation_files: NotRequired[Nullable[List[str]]] + object: Literal["job"] + fine_tuned_model: NotRequired[Nullable[str]] + suffix: NotRequired[Nullable[str]] + integrations: NotRequired[ + Nullable[List[ClassifierFineTuningJobDetailsIntegrationTypedDict]] + ] + trained_tokens: NotRequired[Nullable[int]] + metadata: NotRequired[Nullable[JobMetadataTypedDict]] + job_type: Literal["classifier"] + events: NotRequired[List[EventTypedDict]] + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: NotRequired[List[CheckpointTypedDict]] + + +class ClassifierFineTuningJobDetails(BaseModel): + id: str + + auto_start: bool + + model: str + + status: ClassifierFineTuningJobDetailsStatus + + created_at: int + + modified_at: int + + training_files: List[str] + + hyperparameters: ClassifierTrainingParameters + + classifier_targets: List[ClassifierTargetResult] + + validation_files: OptionalNullable[List[str]] = UNSET + + object: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" + + fine_tuned_model: OptionalNullable[str] = UNSET + + suffix: OptionalNullable[str] = UNSET + + integrations: OptionalNullable[List[ClassifierFineTuningJobDetailsIntegration]] = ( + UNSET + ) + + trained_tokens: OptionalNullable[int] = UNSET + + metadata: OptionalNullable[JobMetadata] = UNSET + + job_type: Annotated[ + Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], + pydantic.Field(alias="job_type"), + ] = "classifier" + + events: Optional[List[Event]] = None + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + + checkpoints: Optional[List[Checkpoint]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "events", + "checkpoints", + ] + ) + nullable_fields = set( + [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ClassifierFineTuningJobDetails.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/classifiertargetin.py b/src/mistralai/client/models/classifiertarget.py similarity index 55% rename from src/mistralai/client/models/classifiertargetin.py rename to src/mistralai/client/models/classifiertarget.py index b250109b..4d66d789 100644 --- a/src/mistralai/client/models/classifiertargetin.py +++ b/src/mistralai/client/models/classifiertarget.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ed021de1c06c +# @generated-id: 2177d51d9dcf from __future__ import annotations from .ftclassifierlossfunction import FTClassifierLossFunction @@ -15,14 +15,14 @@ from typing_extensions import NotRequired, TypedDict -class ClassifierTargetInTypedDict(TypedDict): +class ClassifierTargetTypedDict(TypedDict): name: str labels: List[str] weight: NotRequired[float] loss_function: NotRequired[Nullable[FTClassifierLossFunction]] -class ClassifierTargetIn(BaseModel): +class ClassifierTarget(BaseModel): name: str labels: List[str] @@ -33,30 +33,25 @@ class ClassifierTargetIn(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["weight", "loss_function"] - nullable_fields = ["loss_function"] - null_default_fields = [] - + optional_fields = set(["weight", "loss_function"]) + nullable_fields = set(["loss_function"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/classifiertargetout.py b/src/mistralai/client/models/classifiertargetresult.py similarity index 79% rename from src/mistralai/client/models/classifiertargetout.py rename to src/mistralai/client/models/classifiertargetresult.py index 3d41a4d9..8ce7c0ca 100644 --- a/src/mistralai/client/models/classifiertargetout.py +++ b/src/mistralai/client/models/classifiertargetresult.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 5131f55abefe +# @generated-id: 19c343844888 from __future__ import annotations from .ftclassifierlossfunction import FTClassifierLossFunction @@ -8,14 +8,14 @@ from typing_extensions import TypedDict -class ClassifierTargetOutTypedDict(TypedDict): +class ClassifierTargetResultTypedDict(TypedDict): name: str labels: List[str] weight: float loss_function: FTClassifierLossFunction -class ClassifierTargetOut(BaseModel): +class ClassifierTargetResult(BaseModel): name: str labels: List[str] diff --git a/src/mistralai/client/models/classifiertrainingparameters.py b/src/mistralai/client/models/classifiertrainingparameters.py index f360eda5..14fa4926 100644 --- a/src/mistralai/client/models/classifiertrainingparameters.py +++ b/src/mistralai/client/models/classifiertrainingparameters.py @@ -38,43 +38,36 @@ class ClassifierTrainingParameters(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - null_default_fields = [] - + optional_fields = set( + [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + ) + nullable_fields = set( + ["training_steps", "weight_decay", "warmup_fraction", "epochs", "seq_len"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/classifiertrainingparametersin.py b/src/mistralai/client/models/classifiertrainingparametersin.py deleted file mode 100644 index 85360a7e..00000000 --- a/src/mistralai/client/models/classifiertrainingparametersin.py +++ /dev/null @@ -1,92 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 4b33d5cf0345 - -from __future__ import annotations -from mistralai.client.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ClassifierTrainingParametersInTypedDict(TypedDict): - r"""The fine-tuning hyperparameter settings used in a classifier fine-tune job.""" - - training_steps: NotRequired[Nullable[int]] - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - learning_rate: NotRequired[float] - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - weight_decay: NotRequired[Nullable[float]] - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - warmup_fraction: NotRequired[Nullable[float]] - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - epochs: NotRequired[Nullable[float]] - seq_len: NotRequired[Nullable[int]] - - -class ClassifierTrainingParametersIn(BaseModel): - r"""The fine-tuning hyperparameter settings used in a classifier fine-tune job.""" - - training_steps: OptionalNullable[int] = UNSET - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - - learning_rate: Optional[float] = 0.0001 - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - - weight_decay: OptionalNullable[float] = UNSET - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - - warmup_fraction: OptionalNullable[float] = UNSET - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - - epochs: OptionalNullable[float] = UNSET - - seq_len: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/client/models/codeinterpretertool.py b/src/mistralai/client/models/codeinterpretertool.py index f69c7a57..ce14265f 100644 --- a/src/mistralai/client/models/codeinterpretertool.py +++ b/src/mistralai/client/models/codeinterpretertool.py @@ -2,23 +2,65 @@ # @generated-id: 950cd8f4ad49 from __future__ import annotations -from mistralai.client.types import BaseModel +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal -from typing_extensions import Annotated, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict class CodeInterpreterToolTypedDict(TypedDict): + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] type: Literal["code_interpreter"] class CodeInterpreterTool(BaseModel): - TYPE: Annotated[ + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ Annotated[ Literal["code_interpreter"], AfterValidator(validate_const("code_interpreter")), ], pydantic.Field(alias="type"), ] = "code_interpreter" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + CodeInterpreterTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/completionargs.py b/src/mistralai/client/models/completionargs.py index 918832ac..ab5cf5ff 100644 --- a/src/mistralai/client/models/completionargs.py +++ b/src/mistralai/client/models/completionargs.py @@ -58,51 +58,50 @@ class CompletionArgs(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "stop", - "presence_penalty", - "frequency_penalty", - "temperature", - "top_p", - "max_tokens", - "random_seed", - "prediction", - "response_format", - "tool_choice", - ] - nullable_fields = [ - "stop", - "presence_penalty", - "frequency_penalty", - "temperature", - "top_p", - "max_tokens", - "random_seed", - "prediction", - "response_format", - ] - null_default_fields = [] - + optional_fields = set( + [ + "stop", + "presence_penalty", + "frequency_penalty", + "temperature", + "top_p", + "max_tokens", + "random_seed", + "prediction", + "response_format", + "tool_choice", + ] + ) + nullable_fields = set( + [ + "stop", + "presence_penalty", + "frequency_penalty", + "temperature", + "top_p", + "max_tokens", + "random_seed", + "prediction", + "response_format", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/completionchunk.py b/src/mistralai/client/models/completionchunk.py index 67f447d0..5fd6c173 100644 --- a/src/mistralai/client/models/completionchunk.py +++ b/src/mistralai/client/models/completionchunk.py @@ -7,7 +7,8 @@ CompletionResponseStreamChoiceTypedDict, ) from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import List, Optional from typing_extensions import NotRequired, TypedDict @@ -33,3 +34,19 @@ class CompletionChunk(BaseModel): created: Optional[int] = None usage: Optional[UsageInfo] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "created", "usage"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/completiondetailedjobout.py b/src/mistralai/client/models/completiondetailedjobout.py deleted file mode 100644 index cd3a86ee..00000000 --- a/src/mistralai/client/models/completiondetailedjobout.py +++ /dev/null @@ -1,176 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 9bc38dcfbddf - -from __future__ import annotations -from .checkpointout import CheckpointOut, CheckpointOutTypedDict -from .completiontrainingparameters import ( - CompletionTrainingParameters, - CompletionTrainingParametersTypedDict, -) -from .eventout import EventOut, EventOutTypedDict -from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict -from mistralai.client.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, - UnrecognizedStr, -) -from mistralai.client.utils import validate_const -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypedDict - - -CompletionDetailedJobOutStatus = Union[ - Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", - ], - UnrecognizedStr, -] - - -CompletionDetailedJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict - - -CompletionDetailedJobOutIntegration = WandbIntegrationOut - - -CompletionDetailedJobOutRepositoryTypedDict = GithubRepositoryOutTypedDict - - -CompletionDetailedJobOutRepository = GithubRepositoryOut - - -class CompletionDetailedJobOutTypedDict(TypedDict): - id: str - auto_start: bool - model: str - status: CompletionDetailedJobOutStatus - created_at: int - modified_at: int - training_files: List[str] - hyperparameters: CompletionTrainingParametersTypedDict - validation_files: NotRequired[Nullable[List[str]]] - object: Literal["job"] - fine_tuned_model: NotRequired[Nullable[str]] - suffix: NotRequired[Nullable[str]] - integrations: NotRequired[ - Nullable[List[CompletionDetailedJobOutIntegrationTypedDict]] - ] - trained_tokens: NotRequired[Nullable[int]] - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: Literal["completion"] - repositories: NotRequired[List[CompletionDetailedJobOutRepositoryTypedDict]] - events: NotRequired[List[EventOutTypedDict]] - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - checkpoints: NotRequired[List[CheckpointOutTypedDict]] - - -class CompletionDetailedJobOut(BaseModel): - id: str - - auto_start: bool - - model: str - - status: CompletionDetailedJobOutStatus - - created_at: int - - modified_at: int - - training_files: List[str] - - hyperparameters: CompletionTrainingParameters - - validation_files: OptionalNullable[List[str]] = UNSET - - OBJECT: Annotated[ - Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], - pydantic.Field(alias="object"), - ] = "job" - - fine_tuned_model: OptionalNullable[str] = UNSET - - suffix: OptionalNullable[str] = UNSET - - integrations: OptionalNullable[List[CompletionDetailedJobOutIntegration]] = UNSET - - trained_tokens: OptionalNullable[int] = UNSET - - metadata: OptionalNullable[JobMetadataOut] = UNSET - - JOB_TYPE: Annotated[ - Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], - pydantic.Field(alias="job_type"), - ] = "completion" - - repositories: Optional[List[CompletionDetailedJobOutRepository]] = None - - events: Optional[List[EventOut]] = None - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - - checkpoints: Optional[List[CheckpointOut]] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - "repositories", - "events", - "checkpoints", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/client/models/completionftmodelout.py b/src/mistralai/client/models/completionfinetunedmodel.py similarity index 60% rename from src/mistralai/client/models/completionftmodelout.py rename to src/mistralai/client/models/completionfinetunedmodel.py index 7ecbf54a..54a1c165 100644 --- a/src/mistralai/client/models/completionftmodelout.py +++ b/src/mistralai/client/models/completionfinetunedmodel.py @@ -1,10 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 0f5277833b3e +# @generated-id: f08c10d149f5 from __future__ import annotations -from .ftmodelcapabilitiesout import ( - FTModelCapabilitiesOut, - FTModelCapabilitiesOutTypedDict, +from .finetunedmodelcapabilities import ( + FineTunedModelCapabilities, + FineTunedModelCapabilitiesTypedDict, ) from mistralai.client.types import ( BaseModel, @@ -21,7 +21,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class CompletionFTModelOutTypedDict(TypedDict): +class CompletionFineTunedModelTypedDict(TypedDict): id: str created: int owned_by: str @@ -29,7 +29,7 @@ class CompletionFTModelOutTypedDict(TypedDict): root: str root_version: str archived: bool - capabilities: FTModelCapabilitiesOutTypedDict + capabilities: FineTunedModelCapabilitiesTypedDict job: str object: Literal["model"] name: NotRequired[Nullable[str]] @@ -39,7 +39,7 @@ class CompletionFTModelOutTypedDict(TypedDict): model_type: Literal["completion"] -class CompletionFTModelOut(BaseModel): +class CompletionFineTunedModel(BaseModel): id: str created: int @@ -54,11 +54,11 @@ class CompletionFTModelOut(BaseModel): archived: bool - capabilities: FTModelCapabilitiesOut + capabilities: FineTunedModelCapabilities job: str - OBJECT: Annotated[ + object: Annotated[ Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], pydantic.Field(alias="object"), ] = "model" @@ -71,43 +71,40 @@ class CompletionFTModelOut(BaseModel): aliases: Optional[List[str]] = None - MODEL_TYPE: Annotated[ + model_type: Annotated[ Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], pydantic.Field(alias="model_type"), ] = "completion" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "object", - "name", - "description", - "max_context_length", - "aliases", - ] - nullable_fields = ["name", "description"] - null_default_fields = [] - + optional_fields = set( + ["object", "name", "description", "max_context_length", "aliases"] + ) + nullable_fields = set(["name", "description"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + CompletionFineTunedModel.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/completionjobout.py b/src/mistralai/client/models/completionfinetuningjob.py similarity index 56% rename from src/mistralai/client/models/completionjobout.py rename to src/mistralai/client/models/completionfinetuningjob.py index 42e5f6c6..1bf0a730 100644 --- a/src/mistralai/client/models/completionjobout.py +++ b/src/mistralai/client/models/completionfinetuningjob.py @@ -1,14 +1,17 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 712e6c524f9a +# @generated-id: c242237efe9b from __future__ import annotations from .completiontrainingparameters import ( CompletionTrainingParameters, CompletionTrainingParametersTypedDict, ) -from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from .githubrepository import GithubRepository, GithubRepositoryTypedDict +from .jobmetadata import JobMetadata, JobMetadataTypedDict +from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, +) from mistralai.client.types import ( BaseModel, Nullable, @@ -19,13 +22,13 @@ ) from mistralai.client.utils import validate_const import pydantic -from pydantic import model_serializer +from pydantic import ConfigDict, model_serializer from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional, Union +from typing import Any, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypedDict -CompletionJobOutStatus = Union[ +CompletionFineTuningJobStatus = Union[ Literal[ "QUEUED", "STARTED", @@ -43,24 +46,54 @@ r"""The current status of the fine-tuning job.""" -CompletionJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict +CompletionFineTuningJobIntegrationTypedDict = WandbIntegrationResultTypedDict + + +class UnknownCompletionFineTuningJobIntegration(BaseModel): + r"""A CompletionFineTuningJobIntegration variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_COMPLETION_FINE_TUNING_JOB_INTEGRATION_VARIANTS: dict[str, Any] = { + "wandb": WandbIntegrationResult, +} + + +CompletionFineTuningJobIntegration = WandbIntegrationResult -CompletionJobOutIntegration = WandbIntegrationOut +CompletionFineTuningJobRepositoryTypedDict = GithubRepositoryTypedDict -CompletionJobOutRepositoryTypedDict = GithubRepositoryOutTypedDict +class UnknownCompletionFineTuningJobRepository(BaseModel): + r"""A CompletionFineTuningJobRepository variant the SDK doesn't recognize. Preserves the raw payload.""" + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True -CompletionJobOutRepository = GithubRepositoryOut + model_config = ConfigDict(frozen=True) -class CompletionJobOutTypedDict(TypedDict): +_COMPLETION_FINE_TUNING_JOB_REPOSITORY_VARIANTS: dict[str, Any] = { + "github": GithubRepository, +} + + +CompletionFineTuningJobRepository = GithubRepository + + +class CompletionFineTuningJobTypedDict(TypedDict): id: str r"""The ID of the job.""" auto_start: bool model: str - status: CompletionJobOutStatus + status: CompletionFineTuningJobStatus r"""The current status of the fine-tuning job.""" created_at: int r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" @@ -77,17 +110,19 @@ class CompletionJobOutTypedDict(TypedDict): r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: NotRequired[Nullable[List[CompletionJobOutIntegrationTypedDict]]] + integrations: NotRequired[ + Nullable[List[CompletionFineTuningJobIntegrationTypedDict]] + ] r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: NotRequired[Nullable[int]] r"""Total number of tokens trained.""" - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + metadata: NotRequired[Nullable[JobMetadataTypedDict]] job_type: Literal["completion"] r"""The type of job (`FT` for fine-tuning).""" - repositories: NotRequired[List[CompletionJobOutRepositoryTypedDict]] + repositories: NotRequired[List[CompletionFineTuningJobRepositoryTypedDict]] -class CompletionJobOut(BaseModel): +class CompletionFineTuningJob(BaseModel): id: str r"""The ID of the job.""" @@ -95,7 +130,7 @@ class CompletionJobOut(BaseModel): model: str - status: CompletionJobOutStatus + status: CompletionFineTuningJobStatus r"""The current status of the fine-tuning job.""" created_at: int @@ -112,7 +147,7 @@ class CompletionJobOut(BaseModel): validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data.""" - OBJECT: Annotated[ + object: Annotated[ Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], pydantic.Field(alias="object"), ] = "job" @@ -124,64 +159,69 @@ class CompletionJobOut(BaseModel): suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: OptionalNullable[List[CompletionJobOutIntegration]] = UNSET + integrations: OptionalNullable[List[CompletionFineTuningJobIntegration]] = UNSET r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: OptionalNullable[int] = UNSET r"""Total number of tokens trained.""" - metadata: OptionalNullable[JobMetadataOut] = UNSET + metadata: OptionalNullable[JobMetadata] = UNSET - JOB_TYPE: Annotated[ + job_type: Annotated[ Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], pydantic.Field(alias="job_type"), ] = "completion" r"""The type of job (`FT` for fine-tuning).""" - repositories: Optional[List[CompletionJobOutRepository]] = None + repositories: Optional[List[CompletionFineTuningJobRepository]] = None @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - "repositories", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - + optional_fields = set( + [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "repositories", + ] + ) + nullable_fields = set( + [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + CompletionFineTuningJob.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/completionfinetuningjobdetails.py b/src/mistralai/client/models/completionfinetuningjobdetails.py new file mode 100644 index 00000000..cb787021 --- /dev/null +++ b/src/mistralai/client/models/completionfinetuningjobdetails.py @@ -0,0 +1,216 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e8379265af48 + +from __future__ import annotations +from .checkpoint import Checkpoint, CheckpointTypedDict +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, +) +from .event import Event, EventTypedDict +from .githubrepository import GithubRepository, GithubRepositoryTypedDict +from .jobmetadata import JobMetadata, JobMetadataTypedDict +from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +CompletionFineTuningJobDetailsStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, +] + + +CompletionFineTuningJobDetailsIntegrationTypedDict = WandbIntegrationResultTypedDict + + +class UnknownCompletionFineTuningJobDetailsIntegration(BaseModel): + r"""A CompletionFineTuningJobDetailsIntegration variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_COMPLETION_FINE_TUNING_JOB_DETAILS_INTEGRATION_VARIANTS: dict[str, Any] = { + "wandb": WandbIntegrationResult, +} + + +CompletionFineTuningJobDetailsIntegration = WandbIntegrationResult + + +CompletionFineTuningJobDetailsRepositoryTypedDict = GithubRepositoryTypedDict + + +class UnknownCompletionFineTuningJobDetailsRepository(BaseModel): + r"""A CompletionFineTuningJobDetailsRepository variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_COMPLETION_FINE_TUNING_JOB_DETAILS_REPOSITORY_VARIANTS: dict[str, Any] = { + "github": GithubRepository, +} + + +CompletionFineTuningJobDetailsRepository = GithubRepository + + +class CompletionFineTuningJobDetailsTypedDict(TypedDict): + id: str + auto_start: bool + model: str + status: CompletionFineTuningJobDetailsStatus + created_at: int + modified_at: int + training_files: List[str] + hyperparameters: CompletionTrainingParametersTypedDict + validation_files: NotRequired[Nullable[List[str]]] + object: Literal["job"] + fine_tuned_model: NotRequired[Nullable[str]] + suffix: NotRequired[Nullable[str]] + integrations: NotRequired[ + Nullable[List[CompletionFineTuningJobDetailsIntegrationTypedDict]] + ] + trained_tokens: NotRequired[Nullable[int]] + metadata: NotRequired[Nullable[JobMetadataTypedDict]] + job_type: Literal["completion"] + repositories: NotRequired[List[CompletionFineTuningJobDetailsRepositoryTypedDict]] + events: NotRequired[List[EventTypedDict]] + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: NotRequired[List[CheckpointTypedDict]] + + +class CompletionFineTuningJobDetails(BaseModel): + id: str + + auto_start: bool + + model: str + + status: CompletionFineTuningJobDetailsStatus + + created_at: int + + modified_at: int + + training_files: List[str] + + hyperparameters: CompletionTrainingParameters + + validation_files: OptionalNullable[List[str]] = UNSET + + object: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" + + fine_tuned_model: OptionalNullable[str] = UNSET + + suffix: OptionalNullable[str] = UNSET + + integrations: OptionalNullable[List[CompletionFineTuningJobDetailsIntegration]] = ( + UNSET + ) + + trained_tokens: OptionalNullable[int] = UNSET + + metadata: OptionalNullable[JobMetadata] = UNSET + + job_type: Annotated[ + Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], + pydantic.Field(alias="job_type"), + ] = "completion" + + repositories: Optional[List[CompletionFineTuningJobDetailsRepository]] = None + + events: Optional[List[Event]] = None + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + + checkpoints: Optional[List[Checkpoint]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "repositories", + "events", + "checkpoints", + ] + ) + nullable_fields = set( + [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + CompletionFineTuningJobDetails.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/completionresponsestreamchoice.py b/src/mistralai/client/models/completionresponsestreamchoice.py index 119a9690..a52ae892 100644 --- a/src/mistralai/client/models/completionresponsestreamchoice.py +++ b/src/mistralai/client/models/completionresponsestreamchoice.py @@ -35,30 +35,14 @@ class CompletionResponseStreamChoice(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["finish_reason"] - null_default_fields = [] - serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): + if val != UNSET_SENTINEL: m[k] = val return m diff --git a/src/mistralai/client/models/completiontrainingparameters.py b/src/mistralai/client/models/completiontrainingparameters.py index 4b846b1b..ca50a7ad 100644 --- a/src/mistralai/client/models/completiontrainingparameters.py +++ b/src/mistralai/client/models/completiontrainingparameters.py @@ -41,45 +41,44 @@ class CompletionTrainingParameters(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - null_default_fields = [] - + optional_fields = set( + [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + "fim_ratio", + ] + ) + nullable_fields = set( + [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + "fim_ratio", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/completiontrainingparametersin.py b/src/mistralai/client/models/completiontrainingparametersin.py deleted file mode 100644 index 20b74ad9..00000000 --- a/src/mistralai/client/models/completiontrainingparametersin.py +++ /dev/null @@ -1,97 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 0df22b873b5f - -from __future__ import annotations -from mistralai.client.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class CompletionTrainingParametersInTypedDict(TypedDict): - r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" - - training_steps: NotRequired[Nullable[int]] - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - learning_rate: NotRequired[float] - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - weight_decay: NotRequired[Nullable[float]] - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - warmup_fraction: NotRequired[Nullable[float]] - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - epochs: NotRequired[Nullable[float]] - seq_len: NotRequired[Nullable[int]] - fim_ratio: NotRequired[Nullable[float]] - - -class CompletionTrainingParametersIn(BaseModel): - r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" - - training_steps: OptionalNullable[int] = UNSET - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - - learning_rate: Optional[float] = 0.0001 - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - - weight_decay: OptionalNullable[float] = UNSET - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - - warmup_fraction: OptionalNullable[float] = UNSET - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - - epochs: OptionalNullable[float] = UNSET - - seq_len: OptionalNullable[int] = UNSET - - fim_ratio: OptionalNullable[float] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/client/models/contentchunk.py b/src/mistralai/client/models/contentchunk.py index eff4b8c6..e3de7591 100644 --- a/src/mistralai/client/models/contentchunk.py +++ b/src/mistralai/client/models/contentchunk.py @@ -9,9 +9,12 @@ from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict from .thinkchunk import ThinkChunk, ThinkChunkTypedDict -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union from typing_extensions import Annotated, TypeAliasType @@ -29,15 +32,45 @@ ) +class UnknownContentChunk(BaseModel): + r"""A ContentChunk variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CONTENT_CHUNK_VARIANTS: dict[str, Any] = { + "image_url": ImageURLChunk, + "document_url": DocumentURLChunk, + "text": TextChunk, + "reference": ReferenceChunk, + "file": FileChunk, + "thinking": ThinkChunk, + "input_audio": AudioChunk, +} + + ContentChunk = Annotated[ Union[ - Annotated[ImageURLChunk, Tag("image_url")], - Annotated[DocumentURLChunk, Tag("document_url")], - Annotated[TextChunk, Tag("text")], - Annotated[ReferenceChunk, Tag("reference")], - Annotated[FileChunk, Tag("file")], - Annotated[ThinkChunk, Tag("thinking")], - Annotated[AudioChunk, Tag("input_audio")], + ImageURLChunk, + DocumentURLChunk, + TextChunk, + ReferenceChunk, + FileChunk, + ThinkChunk, + AudioChunk, + UnknownContentChunk, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_CONTENT_CHUNK_VARIANTS, + unknown_cls=UnknownContentChunk, + union_name="ContentChunk", + ) + ), ] diff --git a/src/mistralai/client/models/conversationappendrequest.py b/src/mistralai/client/models/conversationappendrequest.py index 0f07475e..386714fd 100644 --- a/src/mistralai/client/models/conversationappendrequest.py +++ b/src/mistralai/client/models/conversationappendrequest.py @@ -4,8 +4,16 @@ from __future__ import annotations from .completionargs import CompletionArgs, CompletionArgsTypedDict from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.client.types import BaseModel -from typing import Literal, Optional +from .toolcallconfirmation import ToolCallConfirmation, ToolCallConfirmationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional from typing_extensions import NotRequired, TypedDict @@ -16,17 +24,18 @@ class ConversationAppendRequestTypedDict(TypedDict): - inputs: ConversationInputsTypedDict + inputs: NotRequired[ConversationInputsTypedDict] stream: NotRequired[bool] store: NotRequired[bool] r"""Whether to store the results into our servers or not.""" handoff_execution: NotRequired[ConversationAppendRequestHandoffExecution] completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" + tool_confirmations: NotRequired[Nullable[List[ToolCallConfirmationTypedDict]]] class ConversationAppendRequest(BaseModel): - inputs: ConversationInputs + inputs: Optional[ConversationInputs] = None stream: Optional[bool] = False @@ -37,3 +46,39 @@ class ConversationAppendRequest(BaseModel): completion_args: Optional[CompletionArgs] = None r"""White-listed arguments from the completion API""" + + tool_confirmations: OptionalNullable[List[ToolCallConfirmation]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "inputs", + "stream", + "store", + "handoff_execution", + "completion_args", + "tool_confirmations", + ] + ) + nullable_fields = set(["tool_confirmations"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationappendstreamrequest.py b/src/mistralai/client/models/conversationappendstreamrequest.py index a0d46f72..32f6b148 100644 --- a/src/mistralai/client/models/conversationappendstreamrequest.py +++ b/src/mistralai/client/models/conversationappendstreamrequest.py @@ -4,8 +4,16 @@ from __future__ import annotations from .completionargs import CompletionArgs, CompletionArgsTypedDict from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.client.types import BaseModel -from typing import Literal, Optional +from .toolcallconfirmation import ToolCallConfirmation, ToolCallConfirmationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional from typing_extensions import NotRequired, TypedDict @@ -16,17 +24,18 @@ class ConversationAppendStreamRequestTypedDict(TypedDict): - inputs: ConversationInputsTypedDict + inputs: NotRequired[ConversationInputsTypedDict] stream: NotRequired[bool] store: NotRequired[bool] r"""Whether to store the results into our servers or not.""" handoff_execution: NotRequired[ConversationAppendStreamRequestHandoffExecution] completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" + tool_confirmations: NotRequired[Nullable[List[ToolCallConfirmationTypedDict]]] class ConversationAppendStreamRequest(BaseModel): - inputs: ConversationInputs + inputs: Optional[ConversationInputs] = None stream: Optional[bool] = True @@ -39,3 +48,39 @@ class ConversationAppendStreamRequest(BaseModel): completion_args: Optional[CompletionArgs] = None r"""White-listed arguments from the completion API""" + + tool_confirmations: OptionalNullable[List[ToolCallConfirmation]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "inputs", + "stream", + "store", + "handoff_execution", + "completion_args", + "tool_confirmations", + ] + ) + nullable_fields = set(["tool_confirmations"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationevents.py b/src/mistralai/client/models/conversationevents.py index f2476038..17812983 100644 --- a/src/mistralai/client/models/conversationevents.py +++ b/src/mistralai/client/models/conversationevents.py @@ -25,9 +25,12 @@ ToolExecutionStartedEvent, ToolExecutionStartedEventTypedDict, ) +from functools import partial from mistralai.client.types import BaseModel -from pydantic import Field -from typing import Union +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -37,17 +40,41 @@ ResponseStartedEventTypedDict, ResponseDoneEventTypedDict, ResponseErrorEventTypedDict, - ToolExecutionStartedEventTypedDict, ToolExecutionDeltaEventTypedDict, ToolExecutionDoneEventTypedDict, AgentHandoffStartedEventTypedDict, AgentHandoffDoneEventTypedDict, - FunctionCallEventTypedDict, + ToolExecutionStartedEventTypedDict, MessageOutputEventTypedDict, + FunctionCallEventTypedDict, ], ) +class UnknownConversationEventsData(BaseModel): + r"""A ConversationEventsData variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CONVERSATION_EVENTS_DATA_VARIANTS: dict[str, Any] = { + "agent.handoff.done": AgentHandoffDoneEvent, + "agent.handoff.started": AgentHandoffStartedEvent, + "conversation.response.done": ResponseDoneEvent, + "conversation.response.error": ResponseErrorEvent, + "conversation.response.started": ResponseStartedEvent, + "function.call.delta": FunctionCallEvent, + "message.output.delta": MessageOutputEvent, + "tool.execution.delta": ToolExecutionDeltaEvent, + "tool.execution.done": ToolExecutionDoneEvent, + "tool.execution.started": ToolExecutionStartedEvent, +} + + ConversationEventsData = Annotated[ Union[ AgentHandoffDoneEvent, @@ -60,8 +87,17 @@ ToolExecutionDeltaEvent, ToolExecutionDoneEvent, ToolExecutionStartedEvent, + UnknownConversationEventsData, ], - Field(discriminator="TYPE"), + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_CONVERSATION_EVENTS_DATA_VARIANTS, + unknown_cls=UnknownConversationEventsData, + union_name="ConversationEventsData", + ) + ), ] diff --git a/src/mistralai/client/models/conversationhistory.py b/src/mistralai/client/models/conversationhistory.py index 92d6cbf9..ceef115b 100644 --- a/src/mistralai/client/models/conversationhistory.py +++ b/src/mistralai/client/models/conversationhistory.py @@ -8,12 +8,13 @@ from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ConversationHistoryObject = Literal["conversation.history",] +from typing_extensions import Annotated, TypeAliasType, TypedDict EntryTypedDict = TypeAliasType( @@ -21,10 +22,10 @@ Union[ FunctionResultEntryTypedDict, MessageInputEntryTypedDict, - FunctionCallEntryTypedDict, - ToolExecutionEntryTypedDict, MessageOutputEntryTypedDict, AgentHandoffEntryTypedDict, + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, ], ) @@ -34,10 +35,10 @@ Union[ FunctionResultEntry, MessageInputEntry, - FunctionCallEntry, - ToolExecutionEntry, MessageOutputEntry, AgentHandoffEntry, + ToolExecutionEntry, + FunctionCallEntry, ], ) @@ -47,7 +48,7 @@ class ConversationHistoryTypedDict(TypedDict): conversation_id: str entries: List[EntryTypedDict] - object: NotRequired[ConversationHistoryObject] + object: Literal["conversation.history"] class ConversationHistory(BaseModel): @@ -57,4 +58,32 @@ class ConversationHistory(BaseModel): entries: List[Entry] - object: Optional[ConversationHistoryObject] = "conversation.history" + object: Annotated[ + Annotated[ + Optional[Literal["conversation.history"]], + AfterValidator(validate_const("conversation.history")), + ], + pydantic.Field(alias="object"), + ] = "conversation.history" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ConversationHistory.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/conversationmessages.py b/src/mistralai/client/models/conversationmessages.py index 1aa294a4..84664b62 100644 --- a/src/mistralai/client/models/conversationmessages.py +++ b/src/mistralai/client/models/conversationmessages.py @@ -3,12 +3,13 @@ from __future__ import annotations from .messageentries import MessageEntries, MessageEntriesTypedDict -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ConversationMessagesObject = Literal["conversation.messages",] +from typing_extensions import Annotated, TypedDict class ConversationMessagesTypedDict(TypedDict): @@ -16,7 +17,7 @@ class ConversationMessagesTypedDict(TypedDict): conversation_id: str messages: List[MessageEntriesTypedDict] - object: NotRequired[ConversationMessagesObject] + object: Literal["conversation.messages"] class ConversationMessages(BaseModel): @@ -26,4 +27,32 @@ class ConversationMessages(BaseModel): messages: List[MessageEntries] - object: Optional[ConversationMessagesObject] = "conversation.messages" + object: Annotated[ + Annotated[ + Optional[Literal["conversation.messages"]], + AfterValidator(validate_const("conversation.messages")), + ], + pydantic.Field(alias="object"), + ] = "conversation.messages" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ConversationMessages.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/conversationrequest.py b/src/mistralai/client/models/conversationrequest.py index 2005be82..83d599eb 100644 --- a/src/mistralai/client/models/conversationrequest.py +++ b/src/mistralai/client/models/conversationrequest.py @@ -31,11 +31,11 @@ ConversationRequestToolTypedDict = TypeAliasType( "ConversationRequestToolTypedDict", Union[ + FunctionToolTypedDict, WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, CodeInterpreterToolTypedDict, ImageGenerationToolTypedDict, - FunctionToolTypedDict, DocumentLibraryToolTypedDict, ], ) @@ -50,7 +50,7 @@ WebSearchTool, WebSearchPremiumTool, ], - Field(discriminator="TYPE"), + Field(discriminator="type"), ] @@ -111,54 +111,53 @@ class ConversationRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "stream", - "store", - "handoff_execution", - "instructions", - "tools", - "completion_args", - "name", - "description", - "metadata", - "agent_id", - "agent_version", - "model", - ] - nullable_fields = [ - "store", - "handoff_execution", - "instructions", - "completion_args", - "name", - "description", - "metadata", - "agent_id", - "agent_version", - "model", - ] - null_default_fields = [] - + optional_fields = set( + [ + "stream", + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + ) + nullable_fields = set( + [ + "store", + "handoff_execution", + "instructions", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/conversationresponse.py b/src/mistralai/client/models/conversationresponse.py index 24598ef3..f6c10969 100644 --- a/src/mistralai/client/models/conversationresponse.py +++ b/src/mistralai/client/models/conversationresponse.py @@ -7,28 +7,29 @@ from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ConversationResponseObject = Literal["conversation.response",] +from typing_extensions import Annotated, TypeAliasType, TypedDict OutputTypedDict = TypeAliasType( "OutputTypedDict", Union[ - ToolExecutionEntryTypedDict, - FunctionCallEntryTypedDict, MessageOutputEntryTypedDict, AgentHandoffEntryTypedDict, + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, ], ) Output = TypeAliasType( "Output", - Union[ToolExecutionEntry, FunctionCallEntry, MessageOutputEntry, AgentHandoffEntry], + Union[MessageOutputEntry, AgentHandoffEntry, ToolExecutionEntry, FunctionCallEntry], ) @@ -38,7 +39,7 @@ class ConversationResponseTypedDict(TypedDict): conversation_id: str outputs: List[OutputTypedDict] usage: ConversationUsageInfoTypedDict - object: NotRequired[ConversationResponseObject] + object: Literal["conversation.response"] class ConversationResponse(BaseModel): @@ -50,4 +51,32 @@ class ConversationResponse(BaseModel): usage: ConversationUsageInfo - object: Optional[ConversationResponseObject] = "conversation.response" + object: Annotated[ + Annotated[ + Optional[Literal["conversation.response"]], + AfterValidator(validate_const("conversation.response")), + ], + pydantic.Field(alias="object"), + ] = "conversation.response" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ConversationResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/conversationrestartrequest.py b/src/mistralai/client/models/conversationrestartrequest.py index 35d30993..7ae16aff 100644 --- a/src/mistralai/client/models/conversationrestartrequest.py +++ b/src/mistralai/client/models/conversationrestartrequest.py @@ -37,8 +37,8 @@ class ConversationRestartRequestTypedDict(TypedDict): r"""Request to restart a new conversation from a given entry in the conversation.""" - inputs: ConversationInputsTypedDict from_entry_id: str + inputs: NotRequired[ConversationInputsTypedDict] stream: NotRequired[bool] store: NotRequired[bool] r"""Whether to store the results into our servers or not.""" @@ -56,10 +56,10 @@ class ConversationRestartRequestTypedDict(TypedDict): class ConversationRestartRequest(BaseModel): r"""Request to restart a new conversation from a given entry in the conversation.""" - inputs: ConversationInputs - from_entry_id: str + inputs: Optional[ConversationInputs] = None + stream: Optional[bool] = False store: Optional[bool] = True @@ -78,37 +78,35 @@ class ConversationRestartRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "stream", - "store", - "handoff_execution", - "completion_args", - "metadata", - "agent_version", - ] - nullable_fields = ["metadata", "agent_version"] - null_default_fields = [] - + optional_fields = set( + [ + "inputs", + "stream", + "store", + "handoff_execution", + "completion_args", + "metadata", + "agent_version", + ] + ) + nullable_fields = set(["metadata", "agent_version"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/conversationrestartstreamrequest.py b/src/mistralai/client/models/conversationrestartstreamrequest.py index 0ddfb130..0e247261 100644 --- a/src/mistralai/client/models/conversationrestartstreamrequest.py +++ b/src/mistralai/client/models/conversationrestartstreamrequest.py @@ -37,8 +37,8 @@ class ConversationRestartStreamRequestTypedDict(TypedDict): r"""Request to restart a new conversation from a given entry in the conversation.""" - inputs: ConversationInputsTypedDict from_entry_id: str + inputs: NotRequired[ConversationInputsTypedDict] stream: NotRequired[bool] store: NotRequired[bool] r"""Whether to store the results into our servers or not.""" @@ -56,10 +56,10 @@ class ConversationRestartStreamRequestTypedDict(TypedDict): class ConversationRestartStreamRequest(BaseModel): r"""Request to restart a new conversation from a given entry in the conversation.""" - inputs: ConversationInputs - from_entry_id: str + inputs: Optional[ConversationInputs] = None + stream: Optional[bool] = True store: Optional[bool] = True @@ -82,37 +82,35 @@ class ConversationRestartStreamRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "stream", - "store", - "handoff_execution", - "completion_args", - "metadata", - "agent_version", - ] - nullable_fields = ["metadata", "agent_version"] - null_default_fields = [] - + optional_fields = set( + [ + "inputs", + "stream", + "store", + "handoff_execution", + "completion_args", + "metadata", + "agent_version", + ] + ) + nullable_fields = set(["metadata", "agent_version"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/conversationstreamrequest.py b/src/mistralai/client/models/conversationstreamrequest.py index 379a8f28..a20dccae 100644 --- a/src/mistralai/client/models/conversationstreamrequest.py +++ b/src/mistralai/client/models/conversationstreamrequest.py @@ -31,11 +31,11 @@ ConversationStreamRequestToolTypedDict = TypeAliasType( "ConversationStreamRequestToolTypedDict", Union[ + FunctionToolTypedDict, WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, CodeInterpreterToolTypedDict, ImageGenerationToolTypedDict, - FunctionToolTypedDict, DocumentLibraryToolTypedDict, ], ) @@ -50,7 +50,7 @@ WebSearchTool, WebSearchPremiumTool, ], - Field(discriminator="TYPE"), + Field(discriminator="type"), ] @@ -113,54 +113,53 @@ class ConversationStreamRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "stream", - "store", - "handoff_execution", - "instructions", - "tools", - "completion_args", - "name", - "description", - "metadata", - "agent_id", - "agent_version", - "model", - ] - nullable_fields = [ - "store", - "handoff_execution", - "instructions", - "completion_args", - "name", - "description", - "metadata", - "agent_id", - "agent_version", - "model", - ] - null_default_fields = [] - + optional_fields = set( + [ + "stream", + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + ) + nullable_fields = set( + [ + "store", + "handoff_execution", + "instructions", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/conversationthinkchunk.py b/src/mistralai/client/models/conversationthinkchunk.py new file mode 100644 index 00000000..e0e172e3 --- /dev/null +++ b/src/mistralai/client/models/conversationthinkchunk.py @@ -0,0 +1,65 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 77e59cde5c0f + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ConversationThinkChunkThinkingTypedDict = TypeAliasType( + "ConversationThinkChunkThinkingTypedDict", + Union[TextChunkTypedDict, ToolReferenceChunkTypedDict], +) + + +ConversationThinkChunkThinking = TypeAliasType( + "ConversationThinkChunkThinking", Union[TextChunk, ToolReferenceChunk] +) + + +class ConversationThinkChunkTypedDict(TypedDict): + thinking: List[ConversationThinkChunkThinkingTypedDict] + type: Literal["thinking"] + closed: NotRequired[bool] + + +class ConversationThinkChunk(BaseModel): + thinking: List[ConversationThinkChunkThinking] + + type: Annotated[ + Annotated[ + Optional[Literal["thinking"]], AfterValidator(validate_const("thinking")) + ], + pydantic.Field(alias="type"), + ] = "thinking" + + closed: Optional[bool] = True + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "closed"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ConversationThinkChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/conversationusageinfo.py b/src/mistralai/client/models/conversationusageinfo.py index 98db0f16..1e80f89e 100644 --- a/src/mistralai/client/models/conversationusageinfo.py +++ b/src/mistralai/client/models/conversationusageinfo.py @@ -35,36 +35,33 @@ class ConversationUsageInfo(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "prompt_tokens", - "completion_tokens", - "total_tokens", - "connector_tokens", - "connectors", - ] - nullable_fields = ["connector_tokens", "connectors"] - null_default_fields = [] - + optional_fields = set( + [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "connector_tokens", + "connectors", + ] + ) + nullable_fields = set(["connector_tokens", "connectors"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/agentcreationrequest.py b/src/mistralai/client/models/createagentrequest.py similarity index 66% rename from src/mistralai/client/models/agentcreationrequest.py rename to src/mistralai/client/models/createagentrequest.py index 898d42a9..54b09880 100644 --- a/src/mistralai/client/models/agentcreationrequest.py +++ b/src/mistralai/client/models/createagentrequest.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 35b7f4933b3e +# @generated-id: 442629bd914b from __future__ import annotations from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict @@ -21,20 +21,20 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -AgentCreationRequestToolTypedDict = TypeAliasType( - "AgentCreationRequestToolTypedDict", +CreateAgentRequestToolTypedDict = TypeAliasType( + "CreateAgentRequestToolTypedDict", Union[ + FunctionToolTypedDict, WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, CodeInterpreterToolTypedDict, ImageGenerationToolTypedDict, - FunctionToolTypedDict, DocumentLibraryToolTypedDict, ], ) -AgentCreationRequestTool = Annotated[ +CreateAgentRequestTool = Annotated[ Union[ CodeInterpreterTool, DocumentLibraryTool, @@ -43,16 +43,16 @@ WebSearchTool, WebSearchPremiumTool, ], - Field(discriminator="TYPE"), + Field(discriminator="type"), ] -class AgentCreationRequestTypedDict(TypedDict): +class CreateAgentRequestTypedDict(TypedDict): model: str name: str instructions: NotRequired[Nullable[str]] r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentCreationRequestToolTypedDict]] + tools: NotRequired[List[CreateAgentRequestToolTypedDict]] r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" @@ -62,7 +62,7 @@ class AgentCreationRequestTypedDict(TypedDict): version_message: NotRequired[Nullable[str]] -class AgentCreationRequest(BaseModel): +class CreateAgentRequest(BaseModel): model: str name: str @@ -70,7 +70,7 @@ class AgentCreationRequest(BaseModel): instructions: OptionalNullable[str] = UNSET r"""Instruction prompt the model will follow during the conversation.""" - tools: Optional[List[AgentCreationRequestTool]] = None + tools: Optional[List[CreateAgentRequestTool]] = None r"""List of tools which are available to the model during the conversation.""" completion_args: Optional[CompletionArgs] = None @@ -86,44 +86,37 @@ class AgentCreationRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "description", - "handoffs", - "metadata", - "version_message", - ] - nullable_fields = [ - "instructions", - "description", - "handoffs", - "metadata", - "version_message", - ] - null_default_fields = [] - + optional_fields = set( + [ + "instructions", + "tools", + "completion_args", + "description", + "handoffs", + "metadata", + "version_message", + ] + ) + nullable_fields = set( + ["instructions", "description", "handoffs", "metadata", "version_message"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/batchjobin.py b/src/mistralai/client/models/createbatchjobrequest.py similarity index 76% rename from src/mistralai/client/models/batchjobin.py rename to src/mistralai/client/models/createbatchjobrequest.py index a0c3b914..9a901fef 100644 --- a/src/mistralai/client/models/batchjobin.py +++ b/src/mistralai/client/models/createbatchjobrequest.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 72b25c2038d4 +# @generated-id: 56e24cd24e98 from __future__ import annotations from .apiendpoint import APIEndpoint @@ -16,7 +16,7 @@ from typing_extensions import NotRequired, TypedDict -class BatchJobInTypedDict(TypedDict): +class CreateBatchJobRequestTypedDict(TypedDict): endpoint: APIEndpoint input_files: NotRequired[Nullable[List[str]]] r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" @@ -31,7 +31,7 @@ class BatchJobInTypedDict(TypedDict): r"""The timeout in hours for the batch inference job.""" -class BatchJobIn(BaseModel): +class CreateBatchJobRequest(BaseModel): endpoint: APIEndpoint input_files: OptionalNullable[List[str]] = UNSET @@ -53,37 +53,36 @@ class BatchJobIn(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "input_files", - "requests", - "model", - "agent_id", - "metadata", - "timeout_hours", - ] - nullable_fields = ["input_files", "requests", "model", "agent_id", "metadata"] - null_default_fields = [] - + optional_fields = set( + [ + "input_files", + "requests", + "model", + "agent_id", + "metadata", + "timeout_hours", + ] + ) + nullable_fields = set( + ["input_files", "requests", "model", "agent_id", "metadata"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/uploadfileout.py b/src/mistralai/client/models/createfileresponse.py similarity index 69% rename from src/mistralai/client/models/uploadfileout.py rename to src/mistralai/client/models/createfileresponse.py index be291efb..76821280 100644 --- a/src/mistralai/client/models/uploadfileout.py +++ b/src/mistralai/client/models/createfileresponse.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 42466f2bebfb +# @generated-id: fea5e4832dcc from __future__ import annotations from .filepurpose import FilePurpose @@ -17,7 +17,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class UploadFileOutTypedDict(TypedDict): +class CreateFileResponseTypedDict(TypedDict): id: str r"""The unique identifier of the file.""" object: str @@ -36,7 +36,7 @@ class UploadFileOutTypedDict(TypedDict): signature: NotRequired[Nullable[str]] -class UploadFileOut(BaseModel): +class CreateFileResponse(BaseModel): id: str r"""The unique identifier of the file.""" @@ -66,30 +66,31 @@ class UploadFileOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["num_lines", "mimetype", "signature"] - nullable_fields = ["num_lines", "mimetype", "signature"] - null_default_fields = [] - + optional_fields = set(["num_lines", "mimetype", "signature"]) + nullable_fields = set(["num_lines", "mimetype", "signature"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + CreateFileResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/createfinetuningjobop.py b/src/mistralai/client/models/createfinetuningjobop.py deleted file mode 100644 index f55deef5..00000000 --- a/src/mistralai/client/models/createfinetuningjobop.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: fd3c305df250 - -from __future__ import annotations -from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict -from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict -from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutTypedDict -from pydantic import Field -from typing import Union -from typing_extensions import Annotated, TypeAliasType - - -ResponseTypedDict = TypeAliasType( - "ResponseTypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] -) - - -Response = Annotated[ - Union[ClassifierJobOut, CompletionJobOut], Field(discriminator="JOB_TYPE") -] - - -CreateFineTuningJobResponseTypedDict = TypeAliasType( - "CreateFineTuningJobResponseTypedDict", - Union[LegacyJobMetadataOutTypedDict, ResponseTypedDict], -) -r"""OK""" - - -CreateFineTuningJobResponse = TypeAliasType( - "CreateFineTuningJobResponse", Union[LegacyJobMetadataOut, Response] -) -r"""OK""" diff --git a/src/mistralai/client/models/jobin.py b/src/mistralai/client/models/createfinetuningjobrequest.py similarity index 56% rename from src/mistralai/client/models/jobin.py rename to src/mistralai/client/models/createfinetuningjobrequest.py index b3cb8998..e328d944 100644 --- a/src/mistralai/client/models/jobin.py +++ b/src/mistralai/client/models/createfinetuningjobrequest.py @@ -1,15 +1,15 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: f4d176123ccc +# @generated-id: c60d2a45d66b from __future__ import annotations -from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict -from .classifiertrainingparametersin import ( - ClassifierTrainingParametersIn, - ClassifierTrainingParametersInTypedDict, +from .classifiertarget import ClassifierTarget, ClassifierTargetTypedDict +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, ) -from .completiontrainingparametersin import ( - CompletionTrainingParametersIn, - CompletionTrainingParametersInTypedDict, +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, ) from .finetuneablemodeltype import FineTuneableModelType from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict @@ -27,33 +27,30 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -JobInIntegrationTypedDict = WandbIntegrationTypedDict +CreateFineTuningJobRequestIntegrationTypedDict = WandbIntegrationTypedDict -JobInIntegration = WandbIntegration +CreateFineTuningJobRequestIntegration = WandbIntegration HyperparametersTypedDict = TypeAliasType( "HyperparametersTypedDict", - Union[ - ClassifierTrainingParametersInTypedDict, CompletionTrainingParametersInTypedDict - ], + Union[ClassifierTrainingParametersTypedDict, CompletionTrainingParametersTypedDict], ) Hyperparameters = TypeAliasType( - "Hyperparameters", - Union[ClassifierTrainingParametersIn, CompletionTrainingParametersIn], + "Hyperparameters", Union[ClassifierTrainingParameters, CompletionTrainingParameters] ) -JobInRepositoryTypedDict = GithubRepositoryInTypedDict +CreateFineTuningJobRequestRepositoryTypedDict = GithubRepositoryInTypedDict -JobInRepository = GithubRepositoryIn +CreateFineTuningJobRequestRepository = GithubRepositoryIn -class JobInTypedDict(TypedDict): +class CreateFineTuningJobRequestTypedDict(TypedDict): model: str hyperparameters: HyperparametersTypedDict training_files: NotRequired[List[TrainingFileTypedDict]] @@ -61,17 +58,21 @@ class JobInTypedDict(TypedDict): r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" suffix: NotRequired[Nullable[str]] r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" - integrations: NotRequired[Nullable[List[JobInIntegrationTypedDict]]] + integrations: NotRequired[ + Nullable[List[CreateFineTuningJobRequestIntegrationTypedDict]] + ] r"""A list of integrations to enable for your fine-tuning job.""" auto_start: NotRequired[bool] r"""This field will be required in a future release.""" invalid_sample_skip_percentage: NotRequired[float] job_type: NotRequired[Nullable[FineTuneableModelType]] - repositories: NotRequired[Nullable[List[JobInRepositoryTypedDict]]] - classifier_targets: NotRequired[Nullable[List[ClassifierTargetInTypedDict]]] + repositories: NotRequired[ + Nullable[List[CreateFineTuningJobRequestRepositoryTypedDict]] + ] + classifier_targets: NotRequired[Nullable[List[ClassifierTargetTypedDict]]] -class JobIn(BaseModel): +class CreateFineTuningJobRequest(BaseModel): model: str hyperparameters: Hyperparameters @@ -84,7 +85,7 @@ class JobIn(BaseModel): suffix: OptionalNullable[str] = UNSET r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" - integrations: OptionalNullable[List[JobInIntegration]] = UNSET + integrations: OptionalNullable[List[CreateFineTuningJobRequestIntegration]] = UNSET r"""A list of integrations to enable for your fine-tuning job.""" auto_start: Optional[bool] = None @@ -94,53 +95,52 @@ class JobIn(BaseModel): job_type: OptionalNullable[FineTuneableModelType] = UNSET - repositories: OptionalNullable[List[JobInRepository]] = UNSET + repositories: OptionalNullable[List[CreateFineTuningJobRequestRepository]] = UNSET - classifier_targets: OptionalNullable[List[ClassifierTargetIn]] = UNSET + classifier_targets: OptionalNullable[List[ClassifierTarget]] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "training_files", - "validation_files", - "suffix", - "integrations", - "auto_start", - "invalid_sample_skip_percentage", - "job_type", - "repositories", - "classifier_targets", - ] - nullable_fields = [ - "validation_files", - "suffix", - "integrations", - "job_type", - "repositories", - "classifier_targets", - ] - null_default_fields = [] - + optional_fields = set( + [ + "training_files", + "validation_files", + "suffix", + "integrations", + "auto_start", + "invalid_sample_skip_percentage", + "job_type", + "repositories", + "classifier_targets", + ] + ) + nullable_fields = set( + [ + "validation_files", + "suffix", + "integrations", + "job_type", + "repositories", + "classifier_targets", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/libraryin.py b/src/mistralai/client/models/createlibraryrequest.py similarity index 50% rename from src/mistralai/client/models/libraryin.py rename to src/mistralai/client/models/createlibraryrequest.py index 1a71d410..58874e01 100644 --- a/src/mistralai/client/models/libraryin.py +++ b/src/mistralai/client/models/createlibraryrequest.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 6147d5df71d9 +# @generated-id: 1c489bec2f53 from __future__ import annotations from mistralai.client.types import ( @@ -13,13 +13,13 @@ from typing_extensions import NotRequired, TypedDict -class LibraryInTypedDict(TypedDict): +class CreateLibraryRequestTypedDict(TypedDict): name: str description: NotRequired[Nullable[str]] chunk_size: NotRequired[Nullable[int]] -class LibraryIn(BaseModel): +class CreateLibraryRequest(BaseModel): name: str description: OptionalNullable[str] = UNSET @@ -28,30 +28,25 @@ class LibraryIn(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["description", "chunk_size"] - nullable_fields = ["description", "chunk_size"] - null_default_fields = [] - + optional_fields = set(["description", "chunk_size"]) + nullable_fields = set(["description", "chunk_size"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/deletemodelop.py b/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py similarity index 76% rename from src/mistralai/client/models/deletemodelop.py rename to src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py index 55c4b242..199614f5 100644 --- a/src/mistralai/client/models/deletemodelop.py +++ b/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 2c494d99a44d +# @generated-id: 767aba526e43 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class DeleteModelRequestTypedDict(TypedDict): +class DeleteModelV1ModelsModelIDDeleteRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to delete.""" -class DeleteModelRequest(BaseModel): +class DeleteModelV1ModelsModelIDDeleteRequest(BaseModel): model_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/deletefileout.py b/src/mistralai/client/models/deletefileresponse.py similarity index 82% rename from src/mistralai/client/models/deletefileout.py rename to src/mistralai/client/models/deletefileresponse.py index c721f32c..ffd0e0d0 100644 --- a/src/mistralai/client/models/deletefileout.py +++ b/src/mistralai/client/models/deletefileresponse.py @@ -1,12 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 5578701e7327 +# @generated-id: 3ee464763a32 from __future__ import annotations from mistralai.client.types import BaseModel from typing_extensions import TypedDict -class DeleteFileOutTypedDict(TypedDict): +class DeleteFileResponseTypedDict(TypedDict): id: str r"""The ID of the deleted file.""" object: str @@ -15,7 +15,7 @@ class DeleteFileOutTypedDict(TypedDict): r"""The deletion status.""" -class DeleteFileOut(BaseModel): +class DeleteFileResponse(BaseModel): id: str r"""The ID of the deleted file.""" diff --git a/src/mistralai/client/models/deletemodelout.py b/src/mistralai/client/models/deletemodelout.py index bf22ed17..fa0c20a4 100644 --- a/src/mistralai/client/models/deletemodelout.py +++ b/src/mistralai/client/models/deletemodelout.py @@ -2,7 +2,8 @@ # @generated-id: ef6a1671c739 from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -25,3 +26,19 @@ class DeleteModelOut(BaseModel): deleted: Optional[bool] = True r"""The deletion status""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "deleted"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/deltamessage.py b/src/mistralai/client/models/deltamessage.py index fbb8231a..d9fa230e 100644 --- a/src/mistralai/client/models/deltamessage.py +++ b/src/mistralai/client/models/deltamessage.py @@ -41,30 +41,25 @@ class DeltaMessage(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["role", "content", "tool_calls"] - null_default_fields = [] - + optional_fields = set(["role", "content", "tool_calls"]) + nullable_fields = set(["role", "content", "tool_calls"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/documentout.py b/src/mistralai/client/models/document.py similarity index 60% rename from src/mistralai/client/models/documentout.py rename to src/mistralai/client/models/document.py index 3b1a5713..31eebbd1 100644 --- a/src/mistralai/client/models/documentout.py +++ b/src/mistralai/client/models/document.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 7a85b9dca506 +# @generated-id: fbbf7428328c from __future__ import annotations from datetime import datetime @@ -15,7 +15,7 @@ from typing_extensions import NotRequired, TypedDict -class DocumentOutTypedDict(TypedDict): +class DocumentTypedDict(TypedDict): id: str library_id: str hash: Nullable[str] @@ -24,9 +24,9 @@ class DocumentOutTypedDict(TypedDict): size: Nullable[int] name: str created_at: datetime - processing_status: str uploaded_by_id: Nullable[str] uploaded_by_type: str + processing_status: str tokens_processing_total: int summary: NotRequired[Nullable[str]] last_processed_at: NotRequired[Nullable[datetime]] @@ -37,7 +37,7 @@ class DocumentOutTypedDict(TypedDict): attributes: NotRequired[Nullable[Dict[str, Any]]] -class DocumentOut(BaseModel): +class Document(BaseModel): id: str library_id: str @@ -54,12 +54,12 @@ class DocumentOut(BaseModel): created_at: datetime - processing_status: str - uploaded_by_id: Nullable[str] uploaded_by_type: str + processing_status: str + tokens_processing_total: int summary: OptionalNullable[str] = UNSET @@ -78,51 +78,50 @@ class DocumentOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "summary", - "last_processed_at", - "number_of_pages", - "tokens_processing_main_content", - "tokens_processing_summary", - "url", - "attributes", - ] - nullable_fields = [ - "hash", - "mime_type", - "extension", - "size", - "summary", - "last_processed_at", - "number_of_pages", - "uploaded_by_id", - "tokens_processing_main_content", - "tokens_processing_summary", - "url", - "attributes", - ] - null_default_fields = [] - + optional_fields = set( + [ + "summary", + "last_processed_at", + "number_of_pages", + "tokens_processing_main_content", + "tokens_processing_summary", + "url", + "attributes", + ] + ) + nullable_fields = set( + [ + "hash", + "mime_type", + "extension", + "size", + "summary", + "last_processed_at", + "number_of_pages", + "uploaded_by_id", + "tokens_processing_main_content", + "tokens_processing_summary", + "url", + "attributes", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/documentlibrarytool.py b/src/mistralai/client/models/documentlibrarytool.py index ff0f7393..642c3202 100644 --- a/src/mistralai/client/models/documentlibrarytool.py +++ b/src/mistralai/client/models/documentlibrarytool.py @@ -2,17 +2,26 @@ # @generated-id: 3eb3c218f457 from __future__ import annotations -from mistralai.client.types import BaseModel +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import List, Literal -from typing_extensions import Annotated, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict class DocumentLibraryToolTypedDict(TypedDict): library_ids: List[str] r"""Ids of the library in which to search.""" + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] type: Literal["document_library"] @@ -20,10 +29,43 @@ class DocumentLibraryTool(BaseModel): library_ids: List[str] r"""Ids of the library in which to search.""" - TYPE: Annotated[ + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ Annotated[ Literal["document_library"], AfterValidator(validate_const("document_library")), ], pydantic.Field(alias="type"), ] = "document_library" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + DocumentLibraryTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/documenturlchunk.py b/src/mistralai/client/models/documenturlchunk.py index 304cde2b..43444d98 100644 --- a/src/mistralai/client/models/documenturlchunk.py +++ b/src/mistralai/client/models/documenturlchunk.py @@ -9,55 +9,62 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -DocumentURLChunkType = Literal["document_url",] +from typing_extensions import Annotated, NotRequired, TypedDict class DocumentURLChunkTypedDict(TypedDict): document_url: str + type: Literal["document_url"] document_name: NotRequired[Nullable[str]] r"""The filename of the document""" - type: NotRequired[DocumentURLChunkType] class DocumentURLChunk(BaseModel): document_url: str + type: Annotated[ + Annotated[ + Optional[Literal["document_url"]], + AfterValidator(validate_const("document_url")), + ], + pydantic.Field(alias="type"), + ] = "document_url" + document_name: OptionalNullable[str] = UNSET r"""The filename of the document""" - type: Optional[DocumentURLChunkType] = "document_url" - @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["document_name", "type"] - nullable_fields = ["document_name"] - null_default_fields = [] - + optional_fields = set(["type", "document_name"]) + nullable_fields = set(["document_name"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + DocumentURLChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/embeddingrequest.py b/src/mistralai/client/models/embeddingrequest.py index f4537ffa..15950590 100644 --- a/src/mistralai/client/models/embeddingrequest.py +++ b/src/mistralai/client/models/embeddingrequest.py @@ -57,35 +57,33 @@ class EmbeddingRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "metadata", - "output_dimension", - "output_dtype", - "encoding_format", - ] - nullable_fields = ["metadata", "output_dimension"] - null_default_fields = [] - + optional_fields = set( + ["metadata", "output_dimension", "output_dtype", "encoding_format"] + ) + nullable_fields = set(["metadata", "output_dimension"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + EmbeddingRequest.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/embeddingresponsedata.py b/src/mistralai/client/models/embeddingresponsedata.py index a689b290..098cfae0 100644 --- a/src/mistralai/client/models/embeddingresponsedata.py +++ b/src/mistralai/client/models/embeddingresponsedata.py @@ -2,7 +2,8 @@ # @generated-id: 6d6ead6f3803 from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import List, Optional from typing_extensions import NotRequired, TypedDict @@ -19,3 +20,19 @@ class EmbeddingResponseData(BaseModel): embedding: Optional[List[float]] = None index: Optional[int] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "embedding", "index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/eventout.py b/src/mistralai/client/models/event.py similarity index 56% rename from src/mistralai/client/models/eventout.py rename to src/mistralai/client/models/event.py index a0247555..c40ae2b1 100644 --- a/src/mistralai/client/models/eventout.py +++ b/src/mistralai/client/models/event.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: da8ad645a9cb +# @generated-id: e5a68ac2dd57 from __future__ import annotations from mistralai.client.types import ( @@ -14,7 +14,7 @@ from typing_extensions import NotRequired, TypedDict -class EventOutTypedDict(TypedDict): +class EventTypedDict(TypedDict): name: str r"""The name of the event.""" created_at: int @@ -22,7 +22,7 @@ class EventOutTypedDict(TypedDict): data: NotRequired[Nullable[Dict[str, Any]]] -class EventOut(BaseModel): +class Event(BaseModel): name: str r"""The name of the event.""" @@ -33,30 +33,25 @@ class EventOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["data"] - nullable_fields = ["data"] - null_default_fields = [] - + optional_fields = set(["data"]) + nullable_fields = set(["data"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/file.py b/src/mistralai/client/models/file.py index dbbc00b5..1b0ea1d4 100644 --- a/src/mistralai/client/models/file.py +++ b/src/mistralai/client/models/file.py @@ -3,9 +3,10 @@ from __future__ import annotations import io -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import FieldMetadata, MultipartFormMetadata import pydantic +from pydantic import model_serializer from typing import IO, Optional, Union from typing_extensions import Annotated, NotRequired, TypedDict @@ -32,3 +33,19 @@ class File(BaseModel): pydantic.Field(alias="Content-Type"), FieldMetadata(multipart=True), ] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["contentType"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/filechunk.py b/src/mistralai/client/models/filechunk.py index 43ef22f8..5c8d2646 100644 --- a/src/mistralai/client/models/filechunk.py +++ b/src/mistralai/client/models/filechunk.py @@ -2,9 +2,10 @@ # @generated-id: ff3c2d33ab1e from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, TypedDict @@ -18,7 +19,29 @@ class FileChunkTypedDict(TypedDict): class FileChunk(BaseModel): file_id: str - TYPE: Annotated[ + type: Annotated[ Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))], pydantic.Field(alias="type"), ] = "file" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + FileChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/downloadfileop.py b/src/mistralai/client/models/files_api_routes_delete_fileop.py similarity index 74% rename from src/mistralai/client/models/downloadfileop.py rename to src/mistralai/client/models/files_api_routes_delete_fileop.py index fcdc01d6..eaba274b 100644 --- a/src/mistralai/client/models/downloadfileop.py +++ b/src/mistralai/client/models/files_api_routes_delete_fileop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 4d051f08057d +# @generated-id: 2f385cc6138f from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class DownloadFileRequestTypedDict(TypedDict): +class FilesAPIRoutesDeleteFileRequestTypedDict(TypedDict): file_id: str -class DownloadFileRequest(BaseModel): +class FilesAPIRoutesDeleteFileRequest(BaseModel): file_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/deletefileop.py b/src/mistralai/client/models/files_api_routes_download_fileop.py similarity index 73% rename from src/mistralai/client/models/deletefileop.py rename to src/mistralai/client/models/files_api_routes_download_fileop.py index 4feb7812..83de8e73 100644 --- a/src/mistralai/client/models/deletefileop.py +++ b/src/mistralai/client/models/files_api_routes_download_fileop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 286b4e583638 +# @generated-id: 8184ee3577c3 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class DeleteFileRequestTypedDict(TypedDict): +class FilesAPIRoutesDownloadFileRequestTypedDict(TypedDict): file_id: str -class DeleteFileRequest(BaseModel): +class FilesAPIRoutesDownloadFileRequest(BaseModel): file_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/getfilesignedurlop.py b/src/mistralai/client/models/files_api_routes_get_signed_urlop.py similarity index 51% rename from src/mistralai/client/models/getfilesignedurlop.py rename to src/mistralai/client/models/files_api_routes_get_signed_urlop.py index 06ed79ee..64cd6ac5 100644 --- a/src/mistralai/client/models/getfilesignedurlop.py +++ b/src/mistralai/client/models/files_api_routes_get_signed_urlop.py @@ -1,20 +1,21 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 1aa50b81c8cf +# @generated-id: 0a1a18c6431e from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer from typing import Optional from typing_extensions import Annotated, NotRequired, TypedDict -class GetFileSignedURLRequestTypedDict(TypedDict): +class FilesAPIRoutesGetSignedURLRequestTypedDict(TypedDict): file_id: str expiry: NotRequired[int] r"""Number of hours before the url becomes invalid. Defaults to 24h""" -class GetFileSignedURLRequest(BaseModel): +class FilesAPIRoutesGetSignedURLRequest(BaseModel): file_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] @@ -24,3 +25,19 @@ class GetFileSignedURLRequest(BaseModel): FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = 24 r"""Number of hours before the url becomes invalid. Defaults to 24h""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["expiry"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/listfilesop.py b/src/mistralai/client/models/files_api_routes_list_filesop.py similarity index 70% rename from src/mistralai/client/models/listfilesop.py rename to src/mistralai/client/models/files_api_routes_list_filesop.py index a9af5c70..b03e2f88 100644 --- a/src/mistralai/client/models/listfilesop.py +++ b/src/mistralai/client/models/files_api_routes_list_filesop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: e5bd46ac0145 +# @generated-id: b2e92f2a29b4 from __future__ import annotations from .filepurpose import FilePurpose @@ -18,7 +18,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class ListFilesRequestTypedDict(TypedDict): +class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] include_total: NotRequired[bool] @@ -29,7 +29,7 @@ class ListFilesRequestTypedDict(TypedDict): mimetypes: NotRequired[Nullable[List[str]]] -class ListFilesRequest(BaseModel): +class FilesAPIRoutesListFilesRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -72,39 +72,38 @@ class ListFilesRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "include_total", - "sample_type", - "source", - "search", - "purpose", - "mimetypes", - ] - nullable_fields = ["sample_type", "source", "search", "purpose", "mimetypes"] - null_default_fields = [] - + optional_fields = set( + [ + "page", + "page_size", + "include_total", + "sample_type", + "source", + "search", + "purpose", + "mimetypes", + ] + ) + nullable_fields = set( + ["sample_type", "source", "search", "purpose", "mimetypes"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/retrievefileop.py b/src/mistralai/client/models/files_api_routes_retrieve_fileop.py similarity index 73% rename from src/mistralai/client/models/retrievefileop.py rename to src/mistralai/client/models/files_api_routes_retrieve_fileop.py index edd50e57..5f8de05f 100644 --- a/src/mistralai/client/models/retrievefileop.py +++ b/src/mistralai/client/models/files_api_routes_retrieve_fileop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ee73efdf9180 +# @generated-id: 5d5dbb8d5f7a from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class RetrieveFileRequestTypedDict(TypedDict): +class FilesAPIRoutesRetrieveFileRequestTypedDict(TypedDict): file_id: str -class RetrieveFileRequest(BaseModel): +class FilesAPIRoutesRetrieveFileRequest(BaseModel): file_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/uploadfileop.py b/src/mistralai/client/models/files_api_routes_upload_fileop.py similarity index 70% rename from src/mistralai/client/models/uploadfileop.py rename to src/mistralai/client/models/files_api_routes_upload_fileop.py index 50848f0b..54ff4e49 100644 --- a/src/mistralai/client/models/uploadfileop.py +++ b/src/mistralai/client/models/files_api_routes_upload_fileop.py @@ -1,11 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: d67619670938 +# @generated-id: f13b84de6fa7 from __future__ import annotations from .file import File, FileTypedDict from .filepurpose import FilePurpose -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import FieldMetadata, MultipartFormMetadata +from pydantic import model_serializer from typing import Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -39,3 +40,19 @@ class MultiPartBodyParams(BaseModel): """ purpose: Annotated[Optional[FilePurpose], FieldMetadata(multipart=True)] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["purpose"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/fileschema.py b/src/mistralai/client/models/fileschema.py index cbe9b0d1..e99066a9 100644 --- a/src/mistralai/client/models/fileschema.py +++ b/src/mistralai/client/models/fileschema.py @@ -66,30 +66,31 @@ class FileSchema(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["num_lines", "mimetype", "signature"] - nullable_fields = ["num_lines", "mimetype", "signature"] - null_default_fields = [] - + optional_fields = set(["num_lines", "mimetype", "signature"]) + nullable_fields = set(["num_lines", "mimetype", "signature"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + FileSchema.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/fimcompletionrequest.py b/src/mistralai/client/models/fimcompletionrequest.py index e2f60327..ea877213 100644 --- a/src/mistralai/client/models/fimcompletionrequest.py +++ b/src/mistralai/client/models/fimcompletionrequest.py @@ -85,47 +85,46 @@ class FIMCompletionRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - null_default_fields = [] - + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/fimcompletionstreamrequest.py b/src/mistralai/client/models/fimcompletionstreamrequest.py index 480ed17a..e80efc09 100644 --- a/src/mistralai/client/models/fimcompletionstreamrequest.py +++ b/src/mistralai/client/models/fimcompletionstreamrequest.py @@ -83,47 +83,46 @@ class FIMCompletionStreamRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - null_default_fields = [] - + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/finetunedmodelcapabilities.py b/src/mistralai/client/models/finetunedmodelcapabilities.py new file mode 100644 index 00000000..2f4cca0b --- /dev/null +++ b/src/mistralai/client/models/finetunedmodelcapabilities.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 475c805eab95 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class FineTunedModelCapabilitiesTypedDict(TypedDict): + completion_chat: NotRequired[bool] + completion_fim: NotRequired[bool] + function_calling: NotRequired[bool] + fine_tuning: NotRequired[bool] + classification: NotRequired[bool] + + +class FineTunedModelCapabilities(BaseModel): + completion_chat: Optional[bool] = True + + completion_fim: Optional[bool] = False + + function_calling: Optional[bool] = False + + fine_tuning: Optional[bool] = False + + classification: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "completion_chat", + "completion_fim", + "function_calling", + "fine_tuning", + "classification", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/ftmodelcapabilitiesout.py b/src/mistralai/client/models/ftmodelcapabilitiesout.py deleted file mode 100644 index 42269b78..00000000 --- a/src/mistralai/client/models/ftmodelcapabilitiesout.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: f70517be97d4 - -from __future__ import annotations -from mistralai.client.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class FTModelCapabilitiesOutTypedDict(TypedDict): - completion_chat: NotRequired[bool] - completion_fim: NotRequired[bool] - function_calling: NotRequired[bool] - fine_tuning: NotRequired[bool] - classification: NotRequired[bool] - - -class FTModelCapabilitiesOut(BaseModel): - completion_chat: Optional[bool] = True - - completion_fim: Optional[bool] = False - - function_calling: Optional[bool] = False - - fine_tuning: Optional[bool] = False - - classification: Optional[bool] = False diff --git a/src/mistralai/client/models/ftmodelcard.py b/src/mistralai/client/models/ftmodelcard.py index 570e95e2..2c26ff2f 100644 --- a/src/mistralai/client/models/ftmodelcard.py +++ b/src/mistralai/client/models/ftmodelcard.py @@ -71,7 +71,7 @@ class FTModelCard(BaseModel): default_model_temperature: OptionalNullable[float] = UNSET - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["fine-tuned"], AfterValidator(validate_const("fine-tuned"))], pydantic.Field(alias="type"), ] = "fine-tuned" @@ -80,48 +80,53 @@ class FTModelCard(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "object", - "created", - "owned_by", - "name", - "description", - "max_context_length", - "aliases", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - "archived", - ] - nullable_fields = [ - "name", - "description", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - ] - null_default_fields = [] - + optional_fields = set( + [ + "object", + "created", + "owned_by", + "name", + "description", + "max_context_length", + "aliases", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + "archived", + ] + ) + nullable_fields = set( + [ + "name", + "description", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + FTModelCard.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/function.py b/src/mistralai/client/models/function.py index 3632c1af..1da1dcc9 100644 --- a/src/mistralai/client/models/function.py +++ b/src/mistralai/client/models/function.py @@ -2,7 +2,8 @@ # @generated-id: 32275a9d8fee from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Any, Dict, Optional from typing_extensions import NotRequired, TypedDict @@ -22,3 +23,19 @@ class Function(BaseModel): description: Optional[str] = None strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "strict"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/functioncallentry.py b/src/mistralai/client/models/functioncallentry.py index 6ada1d35..d05fad85 100644 --- a/src/mistralai/client/models/functioncallentry.py +++ b/src/mistralai/client/models/functioncallentry.py @@ -13,27 +13,38 @@ OptionalNullable, UNSET, UNSET_SENTINEL, + UnrecognizedStr, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict -FunctionCallEntryObject = Literal["entry",] - - -FunctionCallEntryType = Literal["function.call",] +FunctionCallEntryConfirmationStatus = Union[ + Literal[ + "pending", + "allowed", + "denied", + ], + UnrecognizedStr, +] class FunctionCallEntryTypedDict(TypedDict): tool_call_id: str name: str arguments: FunctionCallEntryArgumentsTypedDict - object: NotRequired[FunctionCallEntryObject] - type: NotRequired[FunctionCallEntryType] + object: Literal["entry"] + type: Literal["function.call"] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] + agent_id: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] id: NotRequired[str] + confirmation_status: NotRequired[Nullable[FunctionCallEntryConfirmationStatus]] class FunctionCallEntry(BaseModel): @@ -43,42 +54,71 @@ class FunctionCallEntry(BaseModel): arguments: FunctionCallEntryArguments - object: Optional[FunctionCallEntryObject] = "entry" + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" - type: Optional[FunctionCallEntryType] = "function.call" + type: Annotated[ + Annotated[ + Optional[Literal["function.call"]], + AfterValidator(validate_const("function.call")), + ], + pydantic.Field(alias="type"), + ] = "function.call" created_at: Optional[datetime] = None completed_at: OptionalNullable[datetime] = UNSET + agent_id: OptionalNullable[str] = UNSET + + model: OptionalNullable[str] = UNSET + id: Optional[str] = None + confirmation_status: OptionalNullable[FunctionCallEntryConfirmationStatus] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id"] - nullable_fields = ["completed_at"] - null_default_fields = [] - + optional_fields = set( + [ + "object", + "type", + "created_at", + "completed_at", + "agent_id", + "model", + "id", + "confirmation_status", + ] + ) + nullable_fields = set( + ["completed_at", "agent_id", "model", "confirmation_status"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + FunctionCallEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/functioncallevent.py b/src/mistralai/client/models/functioncallevent.py index 5d871a0e..849eed76 100644 --- a/src/mistralai/client/models/functioncallevent.py +++ b/src/mistralai/client/models/functioncallevent.py @@ -3,14 +3,32 @@ from __future__ import annotations from datetime import datetime -from mistralai.client.types import BaseModel +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional +from typing import Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypedDict +FunctionCallEventConfirmationStatus = Union[ + Literal[ + "pending", + "allowed", + "denied", + ], + UnrecognizedStr, +] + + class FunctionCallEventTypedDict(TypedDict): id: str name: str @@ -19,6 +37,9 @@ class FunctionCallEventTypedDict(TypedDict): type: Literal["function.call.delta"] created_at: NotRequired[datetime] output_index: NotRequired[int] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + confirmation_status: NotRequired[Nullable[FunctionCallEventConfirmationStatus]] class FunctionCallEvent(BaseModel): @@ -30,7 +51,7 @@ class FunctionCallEvent(BaseModel): arguments: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["function.call.delta"], AfterValidator(validate_const("function.call.delta")), @@ -41,3 +62,42 @@ class FunctionCallEvent(BaseModel): created_at: Optional[datetime] = None output_index: Optional[int] = 0 + + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + confirmation_status: OptionalNullable[FunctionCallEventConfirmationStatus] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + ["created_at", "output_index", "model", "agent_id", "confirmation_status"] + ) + nullable_fields = set(["model", "agent_id", "confirmation_status"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + FunctionCallEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/functionresultentry.py b/src/mistralai/client/models/functionresultentry.py index ca73cbb7..01e2e36f 100644 --- a/src/mistralai/client/models/functionresultentry.py +++ b/src/mistralai/client/models/functionresultentry.py @@ -10,22 +10,19 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -FunctionResultEntryObject = Literal["entry",] - - -FunctionResultEntryType = Literal["function.result",] +from typing_extensions import Annotated, NotRequired, TypedDict class FunctionResultEntryTypedDict(TypedDict): tool_call_id: str result: str - object: NotRequired[FunctionResultEntryObject] - type: NotRequired[FunctionResultEntryType] + object: Literal["entry"] + type: Literal["function.result"] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] id: NotRequired[str] @@ -36,9 +33,18 @@ class FunctionResultEntry(BaseModel): result: str - object: Optional[FunctionResultEntryObject] = "entry" + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" - type: Optional[FunctionResultEntryType] = "function.result" + type: Annotated[ + Annotated[ + Optional[Literal["function.result"]], + AfterValidator(validate_const("function.result")), + ], + pydantic.Field(alias="type"), + ] = "function.result" created_at: Optional[datetime] = None @@ -48,30 +54,31 @@ class FunctionResultEntry(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id"] - nullable_fields = ["completed_at"] - null_default_fields = [] - + optional_fields = set(["object", "type", "created_at", "completed_at", "id"]) + nullable_fields = set(["completed_at"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + FunctionResultEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/functiontool.py b/src/mistralai/client/models/functiontool.py index 13b04496..eae87264 100644 --- a/src/mistralai/client/models/functiontool.py +++ b/src/mistralai/client/models/functiontool.py @@ -19,7 +19,13 @@ class FunctionToolTypedDict(TypedDict): class FunctionTool(BaseModel): function: Function - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["function"], AfterValidator(validate_const("function"))], pydantic.Field(alias="type"), ] = "function" + + +try: + FunctionTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/getagentop.py b/src/mistralai/client/models/getagentop.py deleted file mode 100644 index 55d8fe68..00000000 --- a/src/mistralai/client/models/getagentop.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 5a28bb1e727e - -from __future__ import annotations -from mistralai.client.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -GetAgentAgentVersionTypedDict = TypeAliasType( - "GetAgentAgentVersionTypedDict", Union[int, str] -) - - -GetAgentAgentVersion = TypeAliasType("GetAgentAgentVersion", Union[int, str]) - - -class GetAgentRequestTypedDict(TypedDict): - agent_id: str - agent_version: NotRequired[Nullable[GetAgentAgentVersionTypedDict]] - - -class GetAgentRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - agent_version: Annotated[ - OptionalNullable[GetAgentAgentVersion], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["agent_version"] - nullable_fields = ["agent_version"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/client/models/getdocumenttextcontentop.py b/src/mistralai/client/models/getdocumenttextcontentop.py deleted file mode 100644 index 8a7b4aae..00000000 --- a/src/mistralai/client/models/getdocumenttextcontentop.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ba23717093ef - -from __future__ import annotations -from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class GetDocumentTextContentRequestTypedDict(TypedDict): - library_id: str - document_id: str - - -class GetDocumentTextContentRequest(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/client/models/retrievefileout.py b/src/mistralai/client/models/getfileresponse.py similarity index 69% rename from src/mistralai/client/models/retrievefileout.py rename to src/mistralai/client/models/getfileresponse.py index 2abf2161..f625c153 100644 --- a/src/mistralai/client/models/retrievefileout.py +++ b/src/mistralai/client/models/getfileresponse.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 8bb5859aa0d0 +# @generated-id: 81919086e371 from __future__ import annotations from .filepurpose import FilePurpose @@ -17,7 +17,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class RetrieveFileOutTypedDict(TypedDict): +class GetFileResponseTypedDict(TypedDict): id: str r"""The unique identifier of the file.""" object: str @@ -37,7 +37,7 @@ class RetrieveFileOutTypedDict(TypedDict): signature: NotRequired[Nullable[str]] -class RetrieveFileOut(BaseModel): +class GetFileResponse(BaseModel): id: str r"""The unique identifier of the file.""" @@ -69,30 +69,31 @@ class RetrieveFileOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["num_lines", "mimetype", "signature"] - nullable_fields = ["num_lines", "mimetype", "signature"] - null_default_fields = [] - + optional_fields = set(["num_lines", "mimetype", "signature"]) + nullable_fields = set(["num_lines", "mimetype", "signature"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + GetFileResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/getfinetuningjobop.py b/src/mistralai/client/models/getfinetuningjobop.py deleted file mode 100644 index 1fb732f4..00000000 --- a/src/mistralai/client/models/getfinetuningjobop.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: afe997f96d69 - -from __future__ import annotations -from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutTypedDict, -) -from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutTypedDict, -) -from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata -from pydantic import Field -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class GetFineTuningJobRequestTypedDict(TypedDict): - job_id: str - r"""The ID of the job to analyse.""" - - -class GetFineTuningJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the job to analyse.""" - - -GetFineTuningJobResponseTypedDict = TypeAliasType( - "GetFineTuningJobResponseTypedDict", - Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], -) -r"""OK""" - - -GetFineTuningJobResponse = Annotated[ - Union[ClassifierDetailedJobOut, CompletionDetailedJobOut], - Field(discriminator="JOB_TYPE"), -] -r"""OK""" diff --git a/src/mistralai/client/models/filesignedurl.py b/src/mistralai/client/models/getsignedurlresponse.py similarity index 65% rename from src/mistralai/client/models/filesignedurl.py rename to src/mistralai/client/models/getsignedurlresponse.py index 53dff812..4ba95894 100644 --- a/src/mistralai/client/models/filesignedurl.py +++ b/src/mistralai/client/models/getsignedurlresponse.py @@ -1,14 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: a1754c725163 +# @generated-id: cee4e4197372 from __future__ import annotations from mistralai.client.types import BaseModel from typing_extensions import TypedDict -class FileSignedURLTypedDict(TypedDict): +class GetSignedURLResponseTypedDict(TypedDict): url: str -class FileSignedURL(BaseModel): +class GetSignedURLResponse(BaseModel): url: str diff --git a/src/mistralai/client/models/githubrepositoryout.py b/src/mistralai/client/models/githubrepository.py similarity index 59% rename from src/mistralai/client/models/githubrepositoryout.py rename to src/mistralai/client/models/githubrepository.py index 514df01c..84b01078 100644 --- a/src/mistralai/client/models/githubrepositoryout.py +++ b/src/mistralai/client/models/githubrepository.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: d2434a167623 +# @generated-id: 4bc83ce18378 from __future__ import annotations from mistralai.client.types import ( @@ -17,7 +17,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class GithubRepositoryOutTypedDict(TypedDict): +class GithubRepositoryTypedDict(TypedDict): name: str owner: str commit_id: str @@ -26,14 +26,14 @@ class GithubRepositoryOutTypedDict(TypedDict): weight: NotRequired[float] -class GithubRepositoryOut(BaseModel): +class GithubRepository(BaseModel): name: str owner: str commit_id: str - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["github"], AfterValidator(validate_const("github"))], pydantic.Field(alias="type"), ] = "github" @@ -44,30 +44,31 @@ class GithubRepositoryOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["ref", "weight"] - nullable_fields = ["ref"] - null_default_fields = [] - + optional_fields = set(["ref", "weight"]) + nullable_fields = set(["ref"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + GithubRepository.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/githubrepositoryin.py b/src/mistralai/client/models/githubrepositoryin.py index e55389c3..38bcc208 100644 --- a/src/mistralai/client/models/githubrepositoryin.py +++ b/src/mistralai/client/models/githubrepositoryin.py @@ -33,7 +33,7 @@ class GithubRepositoryIn(BaseModel): token: str - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["github"], AfterValidator(validate_const("github"))], pydantic.Field(alias="type"), ] = "github" @@ -44,30 +44,31 @@ class GithubRepositoryIn(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["ref", "weight"] - nullable_fields = ["ref"] - null_default_fields = [] - + optional_fields = set(["ref", "weight"]) + nullable_fields = set(["ref"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + GithubRepositoryIn.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/imagedetail.py b/src/mistralai/client/models/imagedetail.py new file mode 100644 index 00000000..1982d357 --- /dev/null +++ b/src/mistralai/client/models/imagedetail.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c1084b549abb + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ImageDetail = Union[ + Literal[ + "low", + "auto", + "high", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/imagegenerationtool.py b/src/mistralai/client/models/imagegenerationtool.py index 680c6ce2..c1789b18 100644 --- a/src/mistralai/client/models/imagegenerationtool.py +++ b/src/mistralai/client/models/imagegenerationtool.py @@ -2,23 +2,65 @@ # @generated-id: e1532275faa0 from __future__ import annotations -from mistralai.client.types import BaseModel +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal -from typing_extensions import Annotated, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict class ImageGenerationToolTypedDict(TypedDict): + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] type: Literal["image_generation"] class ImageGenerationTool(BaseModel): - TYPE: Annotated[ + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ Annotated[ Literal["image_generation"], AfterValidator(validate_const("image_generation")), ], pydantic.Field(alias="type"), ] = "image_generation" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ImageGenerationTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/imageurl.py b/src/mistralai/client/models/imageurl.py index 4ff13b1c..ac1030f5 100644 --- a/src/mistralai/client/models/imageurl.py +++ b/src/mistralai/client/models/imageurl.py @@ -2,6 +2,7 @@ # @generated-id: e4bbf5881fbf from __future__ import annotations +from .imagedetail import ImageDetail from mistralai.client.types import ( BaseModel, Nullable, @@ -15,40 +16,35 @@ class ImageURLTypedDict(TypedDict): url: str - detail: NotRequired[Nullable[str]] + detail: NotRequired[Nullable[ImageDetail]] class ImageURL(BaseModel): url: str - detail: OptionalNullable[str] = UNSET + detail: OptionalNullable[ImageDetail] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["detail"] - nullable_fields = ["detail"] - null_default_fields = [] - + optional_fields = set(["detail"]) + nullable_fields = set(["detail"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/imageurlchunk.py b/src/mistralai/client/models/imageurlchunk.py index 993185cc..7134b46e 100644 --- a/src/mistralai/client/models/imageurlchunk.py +++ b/src/mistralai/client/models/imageurlchunk.py @@ -3,9 +3,13 @@ from __future__ import annotations from .imageurl import ImageURL, ImageURLTypedDict -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from typing_extensions import Annotated, TypeAliasType, TypedDict ImageURLUnionTypedDict = TypeAliasType( @@ -16,14 +20,11 @@ ImageURLUnion = TypeAliasType("ImageURLUnion", Union[ImageURL, str]) -ImageURLChunkType = Literal["image_url",] - - class ImageURLChunkTypedDict(TypedDict): r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" image_url: ImageURLUnionTypedDict - type: NotRequired[ImageURLChunkType] + type: Literal["image_url"] class ImageURLChunk(BaseModel): @@ -31,4 +32,31 @@ class ImageURLChunk(BaseModel): image_url: ImageURLUnion - type: Optional[ImageURLChunkType] = "image_url" + type: Annotated[ + Annotated[ + Optional[Literal["image_url"]], AfterValidator(validate_const("image_url")) + ], + pydantic.Field(alias="type"), + ] = "image_url" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ImageURLChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/inputentries.py b/src/mistralai/client/models/inputentries.py index dc989295..e2da5a80 100644 --- a/src/mistralai/client/models/inputentries.py +++ b/src/mistralai/client/models/inputentries.py @@ -17,10 +17,10 @@ Union[ FunctionResultEntryTypedDict, MessageInputEntryTypedDict, - FunctionCallEntryTypedDict, - ToolExecutionEntryTypedDict, MessageOutputEntryTypedDict, AgentHandoffEntryTypedDict, + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, ], ) @@ -30,9 +30,9 @@ Union[ FunctionResultEntry, MessageInputEntry, - FunctionCallEntry, - ToolExecutionEntry, MessageOutputEntry, AgentHandoffEntry, + ToolExecutionEntry, + FunctionCallEntry, ], ) diff --git a/src/mistralai/client/models/inputs.py b/src/mistralai/client/models/inputs.py index cfcdeb3d..9ecd7f48 100644 --- a/src/mistralai/client/models/inputs.py +++ b/src/mistralai/client/models/inputs.py @@ -2,54 +2,16 @@ # @generated-id: 84a8007518c7 from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict from .instructrequest import InstructRequest, InstructRequestTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.client.types import BaseModel -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag from typing import List, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -InputsMessageTypedDict = TypeAliasType( - "InputsMessageTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -InputsMessage = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -class InstructRequestInputsTypedDict(TypedDict): - messages: List[InputsMessageTypedDict] - - -class InstructRequestInputs(BaseModel): - messages: List[InputsMessage] +from typing_extensions import TypeAliasType InputsTypedDict = TypeAliasType( - "InputsTypedDict", - Union[InstructRequestInputsTypedDict, List[InstructRequestTypedDict]], + "InputsTypedDict", Union[InstructRequestTypedDict, List[InstructRequestTypedDict]] ) r"""Chat to classify""" -Inputs = TypeAliasType("Inputs", Union[InstructRequestInputs, List[InstructRequest]]) +Inputs = TypeAliasType("Inputs", Union[InstructRequest, List[InstructRequest]]) r"""Chat to classify""" diff --git a/src/mistralai/client/models/jobmetadataout.py b/src/mistralai/client/models/jobmetadata.py similarity index 52% rename from src/mistralai/client/models/jobmetadataout.py rename to src/mistralai/client/models/jobmetadata.py index 1d386539..f6e96fa1 100644 --- a/src/mistralai/client/models/jobmetadataout.py +++ b/src/mistralai/client/models/jobmetadata.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 805f41e3292a +# @generated-id: cfbdde7fc0a2 from __future__ import annotations from mistralai.client.types import ( @@ -13,7 +13,7 @@ from typing_extensions import NotRequired, TypedDict -class JobMetadataOutTypedDict(TypedDict): +class JobMetadataTypedDict(TypedDict): expected_duration_seconds: NotRequired[Nullable[int]] cost: NotRequired[Nullable[float]] cost_currency: NotRequired[Nullable[str]] @@ -23,7 +23,7 @@ class JobMetadataOutTypedDict(TypedDict): estimated_start_time: NotRequired[Nullable[int]] -class JobMetadataOut(BaseModel): +class JobMetadata(BaseModel): expected_duration_seconds: OptionalNullable[int] = UNSET cost: OptionalNullable[float] = UNSET @@ -40,46 +40,45 @@ class JobMetadataOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - ] - nullable_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - ] - null_default_fields = [] - + optional_fields = set( + [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + ] + ) + nullable_fields = set( + [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/cancelbatchjobop.py b/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py similarity index 72% rename from src/mistralai/client/models/cancelbatchjobop.py rename to src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py index cd94ee86..de2e6347 100644 --- a/src/mistralai/client/models/cancelbatchjobop.py +++ b/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: cebac10b56a9 +# @generated-id: b56cb6c17c95 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class CancelBatchJobRequestTypedDict(TypedDict): +class JobsAPIRoutesBatchCancelBatchJobRequestTypedDict(TypedDict): job_id: str -class CancelBatchJobRequest(BaseModel): +class JobsAPIRoutesBatchCancelBatchJobRequest(BaseModel): job_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/getbatchjobop.py b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py similarity index 56% rename from src/mistralai/client/models/getbatchjobop.py rename to src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py index 792c3e21..d779e1d9 100644 --- a/src/mistralai/client/models/getbatchjobop.py +++ b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 443103fe3b88 +# @generated-id: 36b5a6b3ceee from __future__ import annotations from mistralai.client.types import ( @@ -14,12 +14,12 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class GetBatchJobRequestTypedDict(TypedDict): +class JobsAPIRoutesBatchGetBatchJobRequestTypedDict(TypedDict): job_id: str inline: NotRequired[Nullable[bool]] -class GetBatchJobRequest(BaseModel): +class JobsAPIRoutesBatchGetBatchJobRequest(BaseModel): job_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] @@ -31,30 +31,25 @@ class GetBatchJobRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["inline"] - nullable_fields = ["inline"] - null_default_fields = [] - + optional_fields = set(["inline"]) + nullable_fields = set(["inline"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/listbatchjobsop.py b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py similarity index 71% rename from src/mistralai/client/models/listbatchjobsop.py rename to src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py index 5322df81..89ac3c93 100644 --- a/src/mistralai/client/models/listbatchjobsop.py +++ b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: f49af453f5e6 +# @generated-id: d8f0af99c94d from __future__ import annotations from .batchjobstatus import BatchJobStatus @@ -23,7 +23,7 @@ ] -class ListBatchJobsRequestTypedDict(TypedDict): +class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] model: NotRequired[Nullable[str]] @@ -35,7 +35,7 @@ class ListBatchJobsRequestTypedDict(TypedDict): order_by: NotRequired[OrderBy] -class ListBatchJobsRequest(BaseModel): +class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -83,40 +83,39 @@ class ListBatchJobsRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "model", - "agent_id", - "metadata", - "created_after", - "created_by_me", - "status", - "order_by", - ] - nullable_fields = ["model", "agent_id", "metadata", "created_after", "status"] - null_default_fields = [] - + optional_fields = set( + [ + "page", + "page_size", + "model", + "agent_id", + "metadata", + "created_after", + "created_by_me", + "status", + "order_by", + ] + ) + nullable_fields = set( + ["model", "agent_id", "metadata", "created_after", "status"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/archivemodelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py similarity index 73% rename from src/mistralai/client/models/archivemodelop.py rename to src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py index 30b4a9bd..9fa99837 100644 --- a/src/mistralai/client/models/archivemodelop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: beefa1df3b7c +# @generated-id: 34f89d2af0ec from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class ArchiveModelRequestTypedDict(TypedDict): +class JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to archive.""" -class ArchiveModelRequest(BaseModel): +class JobsAPIRoutesFineTuningArchiveFineTunedModelRequest(BaseModel): model_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py new file mode 100644 index 00000000..56fa5340 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py @@ -0,0 +1,78 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d175c6e32ecb + +from __future__ import annotations +from .classifierfinetuningjobdetails import ( + ClassifierFineTuningJobDetails, + ClassifierFineTuningJobDetailsTypedDict, +) +from .completionfinetuningjobdetails import ( + CompletionFineTuningJobDetails, + CompletionFineTuningJobDetailsTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): + job_id: str + r"""The ID of the job to cancel.""" + + +class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the job to cancel.""" + + +JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", + Union[ + CompletionFineTuningJobDetailsTypedDict, ClassifierFineTuningJobDetailsTypedDict + ], +) +r"""OK""" + + +class UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse(BaseModel): + r"""A JobsAPIRoutesFineTuningCancelFineTuningJobResponse variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_JOBS_API_ROUTES_FINE_TUNING_CANCEL_FINE_TUNING_JOB_RESPONSE_VARIANTS: dict[ + str, Any +] = { + "classifier": ClassifierFineTuningJobDetails, + "completion": CompletionFineTuningJobDetails, +} + + +JobsAPIRoutesFineTuningCancelFineTuningJobResponse = Annotated[ + Union[ + ClassifierFineTuningJobDetails, + CompletionFineTuningJobDetails, + UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_JOBS_API_ROUTES_FINE_TUNING_CANCEL_FINE_TUNING_JOB_RESPONSE_VARIANTS, + unknown_cls=UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse, + union_name="JobsAPIRoutesFineTuningCancelFineTuningJobResponse", + ) + ), +] +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py new file mode 100644 index 00000000..db857f7d --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py @@ -0,0 +1,70 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 81651291187a + +from __future__ import annotations +from .classifierfinetuningjob import ( + ClassifierFineTuningJob, + ClassifierFineTuningJobTypedDict, +) +from .completionfinetuningjob import ( + CompletionFineTuningJob, + CompletionFineTuningJobTypedDict, +) +from .legacyjobmetadata import LegacyJobMetadata, LegacyJobMetadataTypedDict +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType + + +ResponseTypedDict = TypeAliasType( + "ResponseTypedDict", + Union[ClassifierFineTuningJobTypedDict, CompletionFineTuningJobTypedDict], +) + + +class UnknownResponse(BaseModel): + r"""A Response variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_RESPONSE_VARIANTS: dict[str, Any] = { + "classifier": ClassifierFineTuningJob, + "completion": CompletionFineTuningJob, +} + + +Response = Annotated[ + Union[ClassifierFineTuningJob, CompletionFineTuningJob, UnknownResponse], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_RESPONSE_VARIANTS, + unknown_cls=UnknownResponse, + union_name="Response", + ) + ), +] + + +JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", + Union[LegacyJobMetadataTypedDict, ResponseTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningCreateFineTuningJobResponse = TypeAliasType( + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", + Union[LegacyJobMetadata, Response], +) +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py new file mode 100644 index 00000000..ddd9c189 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d910fd8fe2d6 + +from __future__ import annotations +from .classifierfinetuningjobdetails import ( + ClassifierFineTuningJobDetails, + ClassifierFineTuningJobDetailsTypedDict, +) +from .completionfinetuningjobdetails import ( + CompletionFineTuningJobDetails, + CompletionFineTuningJobDetailsTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): + job_id: str + r"""The ID of the job to analyse.""" + + +class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the job to analyse.""" + + +JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", + Union[ + CompletionFineTuningJobDetailsTypedDict, ClassifierFineTuningJobDetailsTypedDict + ], +) +r"""OK""" + + +class UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse(BaseModel): + r"""A JobsAPIRoutesFineTuningGetFineTuningJobResponse variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_JOBS_API_ROUTES_FINE_TUNING_GET_FINE_TUNING_JOB_RESPONSE_VARIANTS: dict[str, Any] = { + "classifier": ClassifierFineTuningJobDetails, + "completion": CompletionFineTuningJobDetails, +} + + +JobsAPIRoutesFineTuningGetFineTuningJobResponse = Annotated[ + Union[ + ClassifierFineTuningJobDetails, + CompletionFineTuningJobDetails, + UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_JOBS_API_ROUTES_FINE_TUNING_GET_FINE_TUNING_JOB_RESPONSE_VARIANTS, + unknown_cls=UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse, + union_name="JobsAPIRoutesFineTuningGetFineTuningJobResponse", + ) + ), +] +r"""OK""" diff --git a/src/mistralai/client/models/listfinetuningjobsop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py similarity index 75% rename from src/mistralai/client/models/listfinetuningjobsop.py rename to src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py index 8712c3fa..ec80a158 100644 --- a/src/mistralai/client/models/listfinetuningjobsop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: b77fe203b929 +# @generated-id: cf43028824bf from __future__ import annotations from datetime import datetime @@ -16,7 +16,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -ListFineTuningJobsStatus = Literal[ +JobsAPIRoutesFineTuningGetFineTuningJobsStatus = Literal[ "QUEUED", "STARTED", "VALIDATING", @@ -31,7 +31,7 @@ r"""The current job state to filter on. When set, the other results are not displayed.""" -class ListFineTuningJobsRequestTypedDict(TypedDict): +class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): page: NotRequired[int] r"""The page number of the results to be returned.""" page_size: NotRequired[int] @@ -43,7 +43,7 @@ class ListFineTuningJobsRequestTypedDict(TypedDict): created_before: NotRequired[Nullable[datetime]] created_by_me: NotRequired[bool] r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" - status: NotRequired[Nullable[ListFineTuningJobsStatus]] + status: NotRequired[Nullable[JobsAPIRoutesFineTuningGetFineTuningJobsStatus]] r"""The current job state to filter on. When set, the other results are not displayed.""" wandb_project: NotRequired[Nullable[str]] r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" @@ -53,7 +53,7 @@ class ListFineTuningJobsRequestTypedDict(TypedDict): r"""The model suffix to filter on. When set, the other results are not displayed.""" -class ListFineTuningJobsRequest(BaseModel): +class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -90,7 +90,7 @@ class ListFineTuningJobsRequest(BaseModel): r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" status: Annotated[ - OptionalNullable[ListFineTuningJobsStatus], + OptionalNullable[JobsAPIRoutesFineTuningGetFineTuningJobsStatus], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET r"""The current job state to filter on. When set, the other results are not displayed.""" @@ -115,49 +115,48 @@ class ListFineTuningJobsRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "model", - "created_after", - "created_before", - "created_by_me", - "status", - "wandb_project", - "wandb_name", - "suffix", - ] - nullable_fields = [ - "model", - "created_after", - "created_before", - "status", - "wandb_project", - "wandb_name", - "suffix", - ] - null_default_fields = [] - + optional_fields = set( + [ + "page", + "page_size", + "model", + "created_after", + "created_before", + "created_by_me", + "status", + "wandb_project", + "wandb_name", + "suffix", + ] + ) + nullable_fields = set( + [ + "model", + "created_after", + "created_before", + "status", + "wandb_project", + "wandb_name", + "suffix", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py new file mode 100644 index 00000000..cd25fa04 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py @@ -0,0 +1,74 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e7ff4a4a4edb + +from __future__ import annotations +from .classifierfinetuningjobdetails import ( + ClassifierFineTuningJobDetails, + ClassifierFineTuningJobDetailsTypedDict, +) +from .completionfinetuningjobdetails import ( + CompletionFineTuningJobDetails, + CompletionFineTuningJobDetailsTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): + job_id: str + + +class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + +JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", + Union[ + CompletionFineTuningJobDetailsTypedDict, ClassifierFineTuningJobDetailsTypedDict + ], +) +r"""OK""" + + +class UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse(BaseModel): + r"""A JobsAPIRoutesFineTuningStartFineTuningJobResponse variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_JOBS_API_ROUTES_FINE_TUNING_START_FINE_TUNING_JOB_RESPONSE_VARIANTS: dict[str, Any] = { + "classifier": ClassifierFineTuningJobDetails, + "completion": CompletionFineTuningJobDetails, +} + + +JobsAPIRoutesFineTuningStartFineTuningJobResponse = Annotated[ + Union[ + ClassifierFineTuningJobDetails, + CompletionFineTuningJobDetails, + UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_JOBS_API_ROUTES_FINE_TUNING_START_FINE_TUNING_JOB_RESPONSE_VARIANTS, + unknown_cls=UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse, + union_name="JobsAPIRoutesFineTuningStartFineTuningJobResponse", + ) + ), +] +r"""OK""" diff --git a/src/mistralai/client/models/unarchivemodelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py similarity index 73% rename from src/mistralai/client/models/unarchivemodelop.py rename to src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py index 1d68a06a..fd01fe69 100644 --- a/src/mistralai/client/models/unarchivemodelop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: eb18584fd78c +# @generated-id: 7cc1c80335a9 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class UnarchiveModelRequestTypedDict(TypedDict): +class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to unarchive.""" -class UnarchiveModelRequest(BaseModel): +class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest(BaseModel): model_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py new file mode 100644 index 00000000..296070b4 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py @@ -0,0 +1,83 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6d9dc624aafd + +from __future__ import annotations +from .classifierfinetunedmodel import ( + ClassifierFineTunedModel, + ClassifierFineTunedModelTypedDict, +) +from .completionfinetunedmodel import ( + CompletionFineTunedModel, + CompletionFineTunedModelTypedDict, +) +from .updatemodelrequest import UpdateModelRequest, UpdateModelRequestTypedDict +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to update.""" + update_model_request: UpdateModelRequestTypedDict + + +class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to update.""" + + update_model_request: Annotated[ + UpdateModelRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] + + +JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", + Union[CompletionFineTunedModelTypedDict, ClassifierFineTunedModelTypedDict], +) +r"""OK""" + + +class UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse(BaseModel): + r"""A JobsAPIRoutesFineTuningUpdateFineTunedModelResponse variant the SDK doesn't recognize. Preserves the raw payload.""" + + model_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_JOBS_API_ROUTES_FINE_TUNING_UPDATE_FINE_TUNED_MODEL_RESPONSE_VARIANTS: dict[ + str, Any +] = { + "classifier": ClassifierFineTunedModel, + "completion": CompletionFineTunedModel, +} + + +JobsAPIRoutesFineTuningUpdateFineTunedModelResponse = Annotated[ + Union[ + ClassifierFineTunedModel, + CompletionFineTunedModel, + UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="model_type", + variants=_JOBS_API_ROUTES_FINE_TUNING_UPDATE_FINE_TUNED_MODEL_RESPONSE_VARIANTS, + unknown_cls=UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + union_name="JobsAPIRoutesFineTuningUpdateFineTunedModelResponse", + ) + ), +] +r"""OK""" diff --git a/src/mistralai/client/models/jobsout.py b/src/mistralai/client/models/jobsout.py deleted file mode 100644 index a4127a5d..00000000 --- a/src/mistralai/client/models/jobsout.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 22e91e9631a9 - -from __future__ import annotations -from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict -from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict -from mistralai.client.types import BaseModel -from mistralai.client.utils import validate_const -import pydantic -from pydantic import Field -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -JobsOutDataTypedDict = TypeAliasType( - "JobsOutDataTypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] -) - - -JobsOutData = Annotated[ - Union[ClassifierJobOut, CompletionJobOut], Field(discriminator="JOB_TYPE") -] - - -class JobsOutTypedDict(TypedDict): - total: int - data: NotRequired[List[JobsOutDataTypedDict]] - object: Literal["list"] - - -class JobsOut(BaseModel): - total: int - - data: Optional[List[JobsOutData]] = None - - OBJECT: Annotated[ - Annotated[Optional[Literal["list"]], AfterValidator(validate_const("list"))], - pydantic.Field(alias="object"), - ] = "list" diff --git a/src/mistralai/client/models/jsonschema.py b/src/mistralai/client/models/jsonschema.py index 948c94ed..dfababa6 100644 --- a/src/mistralai/client/models/jsonschema.py +++ b/src/mistralai/client/models/jsonschema.py @@ -33,30 +33,31 @@ class JSONSchema(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["description", "strict"] - nullable_fields = ["description"] - null_default_fields = [] - + optional_fields = set(["description", "strict"]) + nullable_fields = set(["description"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + JSONSchema.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/legacyjobmetadataout.py b/src/mistralai/client/models/legacyjobmetadata.py similarity index 70% rename from src/mistralai/client/models/legacyjobmetadataout.py rename to src/mistralai/client/models/legacyjobmetadata.py index 4453c157..57576758 100644 --- a/src/mistralai/client/models/legacyjobmetadataout.py +++ b/src/mistralai/client/models/legacyjobmetadata.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 4f44aa38c864 +# @generated-id: 0330b8930f65 from __future__ import annotations from mistralai.client.types import ( @@ -17,7 +17,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class LegacyJobMetadataOutTypedDict(TypedDict): +class LegacyJobMetadataTypedDict(TypedDict): details: str expected_duration_seconds: NotRequired[Nullable[int]] r"""The approximated time (in seconds) for the fine-tuning process to complete.""" @@ -40,7 +40,7 @@ class LegacyJobMetadataOutTypedDict(TypedDict): object: Literal["job.metadata"] -class LegacyJobMetadataOut(BaseModel): +class LegacyJobMetadata(BaseModel): details: str expected_duration_seconds: OptionalNullable[int] = UNSET @@ -71,7 +71,7 @@ class LegacyJobMetadataOut(BaseModel): training_steps: OptionalNullable[int] = UNSET r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - OBJECT: Annotated[ + object: Annotated[ Annotated[ Optional[Literal["job.metadata"]], AfterValidator(validate_const("job.metadata")), @@ -81,52 +81,57 @@ class LegacyJobMetadataOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - "deprecated", - "epochs", - "training_steps", - "object", - ] - nullable_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - "epochs", - "training_steps", - ] - null_default_fields = [] - + optional_fields = set( + [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + "deprecated", + "epochs", + "training_steps", + "object", + ] + ) + nullable_fields = set( + [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + "epochs", + "training_steps", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + LegacyJobMetadata.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/deletelibraryop.py b/src/mistralai/client/models/libraries_delete_v1op.py similarity index 76% rename from src/mistralai/client/models/deletelibraryop.py rename to src/mistralai/client/models/libraries_delete_v1op.py index 5eb6fc31..893ab53b 100644 --- a/src/mistralai/client/models/deletelibraryop.py +++ b/src/mistralai/client/models/libraries_delete_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: cd0ce9bf8d51 +# @generated-id: b2e8bbd19baa from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class DeleteLibraryRequestTypedDict(TypedDict): +class LibrariesDeleteV1RequestTypedDict(TypedDict): library_id: str -class DeleteLibraryRequest(BaseModel): +class LibrariesDeleteV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/deletedocumentop.py b/src/mistralai/client/models/libraries_documents_delete_v1op.py similarity index 79% rename from src/mistralai/client/models/deletedocumentop.py rename to src/mistralai/client/models/libraries_documents_delete_v1op.py index 400070a4..0495832e 100644 --- a/src/mistralai/client/models/deletedocumentop.py +++ b/src/mistralai/client/models/libraries_documents_delete_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 62522db1ccf2 +# @generated-id: 81eb34382a3d from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class DeleteDocumentRequestTypedDict(TypedDict): +class LibrariesDocumentsDeleteV1RequestTypedDict(TypedDict): library_id: str document_id: str -class DeleteDocumentRequest(BaseModel): +class LibrariesDocumentsDeleteV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py b/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py new file mode 100644 index 00000000..186baaed --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a7417ebd6040 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetExtractedTextSignedURLV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py b/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py new file mode 100644 index 00000000..ebcf85d7 --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d4b7b47913ba + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetSignedURLV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetSignedURLV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/getdocumentop.py b/src/mistralai/client/models/libraries_documents_get_status_v1op.py similarity index 78% rename from src/mistralai/client/models/getdocumentop.py rename to src/mistralai/client/models/libraries_documents_get_status_v1op.py index d7b07db7..1f484787 100644 --- a/src/mistralai/client/models/getdocumentop.py +++ b/src/mistralai/client/models/libraries_documents_get_status_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: de89ff93d373 +# @generated-id: f314f73e909c from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class GetDocumentRequestTypedDict(TypedDict): +class LibrariesDocumentsGetStatusV1RequestTypedDict(TypedDict): library_id: str document_id: str -class GetDocumentRequest(BaseModel): +class LibrariesDocumentsGetStatusV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/getdocumentextractedtextsignedurlop.py b/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py similarity index 77% rename from src/mistralai/client/models/getdocumentextractedtextsignedurlop.py rename to src/mistralai/client/models/libraries_documents_get_text_content_v1op.py index 9a71181d..e0508d66 100644 --- a/src/mistralai/client/models/getdocumentextractedtextsignedurlop.py +++ b/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 69099395d631 +# @generated-id: 1ca4e0c41321 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class GetDocumentExtractedTextSignedURLRequestTypedDict(TypedDict): +class LibrariesDocumentsGetTextContentV1RequestTypedDict(TypedDict): library_id: str document_id: str -class GetDocumentExtractedTextSignedURLRequest(BaseModel): +class LibrariesDocumentsGetTextContentV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/getdocumentstatusop.py b/src/mistralai/client/models/libraries_documents_get_v1op.py similarity index 80% rename from src/mistralai/client/models/getdocumentstatusop.py rename to src/mistralai/client/models/libraries_documents_get_v1op.py index 4206f593..857dfbe6 100644 --- a/src/mistralai/client/models/getdocumentstatusop.py +++ b/src/mistralai/client/models/libraries_documents_get_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: f1f40b8f003f +# @generated-id: 26ff35f0c69d from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class GetDocumentStatusRequestTypedDict(TypedDict): +class LibrariesDocumentsGetV1RequestTypedDict(TypedDict): library_id: str document_id: str -class GetDocumentStatusRequest(BaseModel): +class LibrariesDocumentsGetV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/listdocumentsop.py b/src/mistralai/client/models/libraries_documents_list_v1op.py similarity index 67% rename from src/mistralai/client/models/listdocumentsop.py rename to src/mistralai/client/models/libraries_documents_list_v1op.py index 0f7c4584..da7d793b 100644 --- a/src/mistralai/client/models/listdocumentsop.py +++ b/src/mistralai/client/models/libraries_documents_list_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 3e42bdc15383 +# @generated-id: 756f26de3cbe from __future__ import annotations from mistralai.client.types import ( @@ -15,7 +15,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class ListDocumentsRequestTypedDict(TypedDict): +class LibrariesDocumentsListV1RequestTypedDict(TypedDict): library_id: str search: NotRequired[Nullable[str]] page_size: NotRequired[int] @@ -25,7 +25,7 @@ class ListDocumentsRequestTypedDict(TypedDict): sort_order: NotRequired[str] -class ListDocumentsRequest(BaseModel): +class LibrariesDocumentsListV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] @@ -62,37 +62,34 @@ class ListDocumentsRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "search", - "page_size", - "page", - "filters_attributes", - "sort_by", - "sort_order", - ] - nullable_fields = ["search", "filters_attributes"] - null_default_fields = [] - + optional_fields = set( + [ + "search", + "page_size", + "page", + "filters_attributes", + "sort_by", + "sort_order", + ] + ) + nullable_fields = set(["search", "filters_attributes"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/getdocumentsignedurlop.py b/src/mistralai/client/models/libraries_documents_reprocess_v1op.py similarity index 78% rename from src/mistralai/client/models/getdocumentsignedurlop.py rename to src/mistralai/client/models/libraries_documents_reprocess_v1op.py index e5d56c54..a2f9ba2a 100644 --- a/src/mistralai/client/models/getdocumentsignedurlop.py +++ b/src/mistralai/client/models/libraries_documents_reprocess_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: b8d95511c6d1 +# @generated-id: dbbeb02fc336 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class GetDocumentSignedURLRequestTypedDict(TypedDict): +class LibrariesDocumentsReprocessV1RequestTypedDict(TypedDict): library_id: str document_id: str -class GetDocumentSignedURLRequest(BaseModel): +class LibrariesDocumentsReprocessV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/updatedocumentop.py b/src/mistralai/client/models/libraries_documents_update_v1op.py similarity index 64% rename from src/mistralai/client/models/updatedocumentop.py rename to src/mistralai/client/models/libraries_documents_update_v1op.py index 073f22a9..7ad4231f 100644 --- a/src/mistralai/client/models/updatedocumentop.py +++ b/src/mistralai/client/models/libraries_documents_update_v1op.py @@ -1,20 +1,20 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: eee9ef317180 +# @generated-id: 734ba6c19f5f from __future__ import annotations -from .documentupdatein import DocumentUpdateIn, DocumentUpdateInTypedDict +from .updatedocumentrequest import UpdateDocumentRequest, UpdateDocumentRequestTypedDict from mistralai.client.types import BaseModel from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata from typing_extensions import Annotated, TypedDict -class UpdateDocumentRequestTypedDict(TypedDict): +class LibrariesDocumentsUpdateV1RequestTypedDict(TypedDict): library_id: str document_id: str - document_update_in: DocumentUpdateInTypedDict + update_document_request: UpdateDocumentRequestTypedDict -class UpdateDocumentRequest(BaseModel): +class LibrariesDocumentsUpdateV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] @@ -23,7 +23,7 @@ class UpdateDocumentRequest(BaseModel): str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] - document_update_in: Annotated[ - DocumentUpdateIn, + update_document_request: Annotated[ + UpdateDocumentRequest, FieldMetadata(request=RequestMetadata(media_type="application/json")), ] diff --git a/src/mistralai/client/models/uploaddocumentop.py b/src/mistralai/client/models/libraries_documents_upload_v1op.py similarity index 91% rename from src/mistralai/client/models/uploaddocumentop.py rename to src/mistralai/client/models/libraries_documents_upload_v1op.py index 2c957947..388633d1 100644 --- a/src/mistralai/client/models/uploaddocumentop.py +++ b/src/mistralai/client/models/libraries_documents_upload_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 0018fe7ff48c +# @generated-id: 744466971862 from __future__ import annotations from .file import File, FileTypedDict @@ -41,12 +41,12 @@ class DocumentUpload(BaseModel): """ -class UploadDocumentRequestTypedDict(TypedDict): +class LibrariesDocumentsUploadV1RequestTypedDict(TypedDict): library_id: str request_body: DocumentUploadTypedDict -class UploadDocumentRequest(BaseModel): +class LibrariesDocumentsUploadV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/getlibraryop.py b/src/mistralai/client/models/libraries_get_v1op.py similarity index 77% rename from src/mistralai/client/models/getlibraryop.py rename to src/mistralai/client/models/libraries_get_v1op.py index bc0b4a23..7a51d605 100644 --- a/src/mistralai/client/models/getlibraryop.py +++ b/src/mistralai/client/models/libraries_get_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: c84a92e23a90 +# @generated-id: d493f39e7ebb from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class GetLibraryRequestTypedDict(TypedDict): +class LibrariesGetV1RequestTypedDict(TypedDict): library_id: str -class GetLibraryRequest(BaseModel): +class LibrariesGetV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/updateorcreatelibraryaccessop.py b/src/mistralai/client/models/libraries_share_create_v1op.py similarity index 81% rename from src/mistralai/client/models/updateorcreatelibraryaccessop.py rename to src/mistralai/client/models/libraries_share_create_v1op.py index 1abe6eda..00ea7482 100644 --- a/src/mistralai/client/models/updateorcreatelibraryaccessop.py +++ b/src/mistralai/client/models/libraries_share_create_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ec9b15418f5c +# @generated-id: feaacfd46dd3 from __future__ import annotations from .sharingin import SharingIn, SharingInTypedDict @@ -8,12 +8,12 @@ from typing_extensions import Annotated, TypedDict -class UpdateOrCreateLibraryAccessRequestTypedDict(TypedDict): +class LibrariesShareCreateV1RequestTypedDict(TypedDict): library_id: str sharing_in: SharingInTypedDict -class UpdateOrCreateLibraryAccessRequest(BaseModel): +class LibrariesShareCreateV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/deletelibraryaccessop.py b/src/mistralai/client/models/libraries_share_delete_v1op.py similarity index 83% rename from src/mistralai/client/models/deletelibraryaccessop.py rename to src/mistralai/client/models/libraries_share_delete_v1op.py index ca14c3ff..eca3f86a 100644 --- a/src/mistralai/client/models/deletelibraryaccessop.py +++ b/src/mistralai/client/models/libraries_share_delete_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: df80945bcf19 +# @generated-id: 7f3a679ca384 from __future__ import annotations from .sharingdelete import SharingDelete, SharingDeleteTypedDict @@ -8,12 +8,12 @@ from typing_extensions import Annotated, TypedDict -class DeleteLibraryAccessRequestTypedDict(TypedDict): +class LibrariesShareDeleteV1RequestTypedDict(TypedDict): library_id: str sharing_delete: SharingDeleteTypedDict -class DeleteLibraryAccessRequest(BaseModel): +class LibrariesShareDeleteV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/listlibraryaccessesop.py b/src/mistralai/client/models/libraries_share_list_v1op.py similarity index 75% rename from src/mistralai/client/models/listlibraryaccessesop.py rename to src/mistralai/client/models/libraries_share_list_v1op.py index 2206310f..895a2590 100644 --- a/src/mistralai/client/models/listlibraryaccessesop.py +++ b/src/mistralai/client/models/libraries_share_list_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 581b332626b7 +# @generated-id: 8f0af379bf1c from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class ListLibraryAccessesRequestTypedDict(TypedDict): +class LibrariesShareListV1RequestTypedDict(TypedDict): library_id: str -class ListLibraryAccessesRequest(BaseModel): +class LibrariesShareListV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/updatelibraryop.py b/src/mistralai/client/models/libraries_update_v1op.py similarity index 60% rename from src/mistralai/client/models/updatelibraryop.py rename to src/mistralai/client/models/libraries_update_v1op.py index c5a1ad30..54b0ab70 100644 --- a/src/mistralai/client/models/updatelibraryop.py +++ b/src/mistralai/client/models/libraries_update_v1op.py @@ -1,24 +1,24 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 4ba7acdb62c6 +# @generated-id: 92c8d4132252 from __future__ import annotations -from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict +from .updatelibraryrequest import UpdateLibraryRequest, UpdateLibraryRequestTypedDict from mistralai.client.types import BaseModel from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata from typing_extensions import Annotated, TypedDict -class UpdateLibraryRequestTypedDict(TypedDict): +class LibrariesUpdateV1RequestTypedDict(TypedDict): library_id: str - library_in_update: LibraryInUpdateTypedDict + update_library_request: UpdateLibraryRequestTypedDict -class UpdateLibraryRequest(BaseModel): +class LibrariesUpdateV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] - library_in_update: Annotated[ - LibraryInUpdate, + update_library_request: Annotated[ + UpdateLibraryRequest, FieldMetadata(request=RequestMetadata(media_type="application/json")), ] diff --git a/src/mistralai/client/models/libraryout.py b/src/mistralai/client/models/library.py similarity index 58% rename from src/mistralai/client/models/libraryout.py rename to src/mistralai/client/models/library.py index c7ab7b8d..1953b6fb 100644 --- a/src/mistralai/client/models/libraryout.py +++ b/src/mistralai/client/models/library.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 4e608c7aafc4 +# @generated-id: 028a34b08f9c from __future__ import annotations from datetime import datetime @@ -14,7 +14,7 @@ from typing_extensions import NotRequired, TypedDict -class LibraryOutTypedDict(TypedDict): +class LibraryTypedDict(TypedDict): id: str name: str created_at: datetime @@ -34,7 +34,7 @@ class LibraryOutTypedDict(TypedDict): r"""Generated Name""" -class LibraryOut(BaseModel): +class Library(BaseModel): id: str name: str @@ -70,48 +70,47 @@ class LibraryOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "emoji", - "description", - "generated_description", - "explicit_user_members_count", - "explicit_workspace_members_count", - "org_sharing_role", - "generated_name", - ] - nullable_fields = [ - "owner_id", - "chunk_size", - "emoji", - "description", - "generated_description", - "explicit_user_members_count", - "explicit_workspace_members_count", - "org_sharing_role", - "generated_name", - ] - null_default_fields = [] - + optional_fields = set( + [ + "emoji", + "description", + "generated_description", + "explicit_user_members_count", + "explicit_workspace_members_count", + "org_sharing_role", + "generated_name", + ] + ) + nullable_fields = set( + [ + "owner_id", + "chunk_size", + "emoji", + "description", + "generated_description", + "explicit_user_members_count", + "explicit_workspace_members_count", + "org_sharing_role", + "generated_name", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/libraryinupdate.py b/src/mistralai/client/models/libraryinupdate.py deleted file mode 100644 index 328b2de3..00000000 --- a/src/mistralai/client/models/libraryinupdate.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 300a6bb02e6e - -from __future__ import annotations -from mistralai.client.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class LibraryInUpdateTypedDict(TypedDict): - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - - -class LibraryInUpdate(BaseModel): - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["name", "description"] - nullable_fields = ["name", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/client/models/listbatchjobsresponse.py b/src/mistralai/client/models/listbatchjobsresponse.py new file mode 100644 index 00000000..35a348a1 --- /dev/null +++ b/src/mistralai/client/models/listbatchjobsresponse.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 99d94c86a871 + +from __future__ import annotations +from .batchjob import BatchJob, BatchJobTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ListBatchJobsResponseTypedDict(TypedDict): + total: int + data: NotRequired[List[BatchJobTypedDict]] + object: Literal["list"] + + +class ListBatchJobsResponse(BaseModel): + total: int + + data: Optional[List[BatchJob]] = None + + object: Annotated[ + Annotated[Optional[Literal["list"]], AfterValidator(validate_const("list"))], + pydantic.Field(alias="object"), + ] = "list" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["data", "object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ListBatchJobsResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/listdocumentout.py b/src/mistralai/client/models/listdocumentsresponse.py similarity index 60% rename from src/mistralai/client/models/listdocumentout.py rename to src/mistralai/client/models/listdocumentsresponse.py index a636b3de..c48b8c05 100644 --- a/src/mistralai/client/models/listdocumentout.py +++ b/src/mistralai/client/models/listdocumentsresponse.py @@ -1,20 +1,20 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: b2c96075ce00 +# @generated-id: f593d8e66833 from __future__ import annotations -from .documentout import DocumentOut, DocumentOutTypedDict +from .document import Document, DocumentTypedDict from .paginationinfo import PaginationInfo, PaginationInfoTypedDict from mistralai.client.types import BaseModel from typing import List from typing_extensions import TypedDict -class ListDocumentOutTypedDict(TypedDict): +class ListDocumentsResponseTypedDict(TypedDict): pagination: PaginationInfoTypedDict - data: List[DocumentOutTypedDict] + data: List[DocumentTypedDict] -class ListDocumentOut(BaseModel): +class ListDocumentsResponse(BaseModel): pagination: PaginationInfo - data: List[DocumentOut] + data: List[Document] diff --git a/src/mistralai/client/models/listfilesout.py b/src/mistralai/client/models/listfilesresponse.py similarity index 53% rename from src/mistralai/client/models/listfilesout.py rename to src/mistralai/client/models/listfilesresponse.py index 460822f7..10a60126 100644 --- a/src/mistralai/client/models/listfilesout.py +++ b/src/mistralai/client/models/listfilesresponse.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ae5fa21b141c +# @generated-id: 85d6d24c1a19 from __future__ import annotations from .fileschema import FileSchema, FileSchemaTypedDict @@ -15,13 +15,13 @@ from typing_extensions import NotRequired, TypedDict -class ListFilesOutTypedDict(TypedDict): +class ListFilesResponseTypedDict(TypedDict): data: List[FileSchemaTypedDict] object: str total: NotRequired[Nullable[int]] -class ListFilesOut(BaseModel): +class ListFilesResponse(BaseModel): data: List[FileSchema] object: str @@ -30,30 +30,25 @@ class ListFilesOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["total"] - nullable_fields = ["total"] - null_default_fields = [] - + optional_fields = set(["total"]) + nullable_fields = set(["total"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/listfinetuningjobsresponse.py b/src/mistralai/client/models/listfinetuningjobsresponse.py new file mode 100644 index 00000000..1e434c59 --- /dev/null +++ b/src/mistralai/client/models/listfinetuningjobsresponse.py @@ -0,0 +1,100 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 118e05dbfbbd + +from __future__ import annotations +from .classifierfinetuningjob import ( + ClassifierFineTuningJob, + ClassifierFineTuningJobTypedDict, +) +from .completionfinetuningjob import ( + CompletionFineTuningJob, + CompletionFineTuningJobTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +from mistralai.client.utils.unions import parse_open_union +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator, BeforeValidator +from typing import Any, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ListFineTuningJobsResponseDataTypedDict = TypeAliasType( + "ListFineTuningJobsResponseDataTypedDict", + Union[ClassifierFineTuningJobTypedDict, CompletionFineTuningJobTypedDict], +) + + +class UnknownListFineTuningJobsResponseData(BaseModel): + r"""A ListFineTuningJobsResponseData variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_LIST_FINE_TUNING_JOBS_RESPONSE_DATA_VARIANTS: dict[str, Any] = { + "classifier": ClassifierFineTuningJob, + "completion": CompletionFineTuningJob, +} + + +ListFineTuningJobsResponseData = Annotated[ + Union[ + ClassifierFineTuningJob, + CompletionFineTuningJob, + UnknownListFineTuningJobsResponseData, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_LIST_FINE_TUNING_JOBS_RESPONSE_DATA_VARIANTS, + unknown_cls=UnknownListFineTuningJobsResponseData, + union_name="ListFineTuningJobsResponseData", + ) + ), +] + + +class ListFineTuningJobsResponseTypedDict(TypedDict): + total: int + data: NotRequired[List[ListFineTuningJobsResponseDataTypedDict]] + object: Literal["list"] + + +class ListFineTuningJobsResponse(BaseModel): + total: int + + data: Optional[List[ListFineTuningJobsResponseData]] = None + + object: Annotated[ + Annotated[Optional[Literal["list"]], AfterValidator(validate_const("list"))], + pydantic.Field(alias="object"), + ] = "list" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["data", "object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ListFineTuningJobsResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/listlibrariesresponse.py b/src/mistralai/client/models/listlibrariesresponse.py new file mode 100644 index 00000000..337fe105 --- /dev/null +++ b/src/mistralai/client/models/listlibrariesresponse.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: df556a618365 + +from __future__ import annotations +from .library import Library, LibraryTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ListLibrariesResponseTypedDict(TypedDict): + data: List[LibraryTypedDict] + + +class ListLibrariesResponse(BaseModel): + data: List[Library] diff --git a/src/mistralai/client/models/listlibraryout.py b/src/mistralai/client/models/listlibraryout.py deleted file mode 100644 index 39fa459f..00000000 --- a/src/mistralai/client/models/listlibraryout.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: cb78c529e763 - -from __future__ import annotations -from .libraryout import LibraryOut, LibraryOutTypedDict -from mistralai.client.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ListLibraryOutTypedDict(TypedDict): - data: List[LibraryOutTypedDict] - - -class ListLibraryOut(BaseModel): - data: List[LibraryOut] diff --git a/src/mistralai/client/models/messageinputcontentchunks.py b/src/mistralai/client/models/messageinputcontentchunks.py index 63cf14e7..1e04ce24 100644 --- a/src/mistralai/client/models/messageinputcontentchunks.py +++ b/src/mistralai/client/models/messageinputcontentchunks.py @@ -2,10 +2,13 @@ # @generated-id: 01025c12866a from __future__ import annotations +from .conversationthinkchunk import ( + ConversationThinkChunk, + ConversationThinkChunkTypedDict, +) from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from .thinkchunk import ThinkChunk, ThinkChunkTypedDict from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict from typing import Union from typing_extensions import TypeAliasType @@ -17,7 +20,7 @@ TextChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict, - ThinkChunkTypedDict, + ConversationThinkChunkTypedDict, ToolFileChunkTypedDict, ], ) @@ -25,5 +28,11 @@ MessageInputContentChunks = TypeAliasType( "MessageInputContentChunks", - Union[TextChunk, ImageURLChunk, DocumentURLChunk, ThinkChunk, ToolFileChunk], + Union[ + TextChunk, + ImageURLChunk, + DocumentURLChunk, + ConversationThinkChunk, + ToolFileChunk, + ], ) diff --git a/src/mistralai/client/models/messageinputentry.py b/src/mistralai/client/models/messageinputentry.py index 15046d25..c948a13e 100644 --- a/src/mistralai/client/models/messageinputentry.py +++ b/src/mistralai/client/models/messageinputentry.py @@ -15,18 +15,15 @@ UNSET_SENTINEL, UnrecognizedStr, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -MessageInputEntryObject = Literal["entry",] - - -MessageInputEntryType = Literal["message.input",] - - -MessageInputEntryRole = Union[ +Role = Union[ Literal[ "assistant", "user", @@ -49,10 +46,10 @@ class MessageInputEntryTypedDict(TypedDict): r"""Representation of an input message inside the conversation.""" - role: MessageInputEntryRole + role: Role content: MessageInputEntryContentTypedDict - object: NotRequired[MessageInputEntryObject] - type: NotRequired[MessageInputEntryType] + object: Literal["entry"] + type: Literal["message.input"] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] id: NotRequired[str] @@ -62,13 +59,22 @@ class MessageInputEntryTypedDict(TypedDict): class MessageInputEntry(BaseModel): r"""Representation of an input message inside the conversation.""" - role: MessageInputEntryRole + role: Role content: MessageInputEntryContent - object: Optional[MessageInputEntryObject] = "entry" + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" - type: Optional[MessageInputEntryType] = "message.input" + type: Annotated[ + Annotated[ + Optional[Literal["message.input"]], + AfterValidator(validate_const("message.input")), + ], + pydantic.Field(alias="type"), + ] = "message.input" created_at: Optional[datetime] = None @@ -80,37 +86,33 @@ class MessageInputEntry(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "object", - "type", - "created_at", - "completed_at", - "id", - "prefix", - ] - nullable_fields = ["completed_at"] - null_default_fields = [] - + optional_fields = set( + ["object", "type", "created_at", "completed_at", "id", "prefix"] + ) + nullable_fields = set(["completed_at"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + MessageInputEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/messageoutputcontentchunks.py b/src/mistralai/client/models/messageoutputcontentchunks.py index def7a4d2..bf455d17 100644 --- a/src/mistralai/client/models/messageoutputcontentchunks.py +++ b/src/mistralai/client/models/messageoutputcontentchunks.py @@ -2,10 +2,13 @@ # @generated-id: 2ed248515035 from __future__ import annotations +from .conversationthinkchunk import ( + ConversationThinkChunk, + ConversationThinkChunkTypedDict, +) from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from .thinkchunk import ThinkChunk, ThinkChunkTypedDict from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict from typing import Union @@ -18,7 +21,7 @@ TextChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict, - ThinkChunkTypedDict, + ConversationThinkChunkTypedDict, ToolFileChunkTypedDict, ToolReferenceChunkTypedDict, ], @@ -31,7 +34,7 @@ TextChunk, ImageURLChunk, DocumentURLChunk, - ThinkChunk, + ConversationThinkChunk, ToolFileChunk, ToolReferenceChunk, ], diff --git a/src/mistralai/client/models/messageoutputentry.py b/src/mistralai/client/models/messageoutputentry.py index 8752fc36..6a9c52ed 100644 --- a/src/mistralai/client/models/messageoutputentry.py +++ b/src/mistralai/client/models/messageoutputentry.py @@ -14,18 +14,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -MessageOutputEntryObject = Literal["entry",] - - -MessageOutputEntryType = Literal["message.output",] - - -MessageOutputEntryRole = Literal["assistant",] +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict MessageOutputEntryContentTypedDict = TypeAliasType( @@ -41,70 +35,87 @@ class MessageOutputEntryTypedDict(TypedDict): content: MessageOutputEntryContentTypedDict - object: NotRequired[MessageOutputEntryObject] - type: NotRequired[MessageOutputEntryType] + object: Literal["entry"] + type: Literal["message.output"] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] agent_id: NotRequired[Nullable[str]] model: NotRequired[Nullable[str]] - role: NotRequired[MessageOutputEntryRole] + id: NotRequired[str] + role: Literal["assistant"] class MessageOutputEntry(BaseModel): content: MessageOutputEntryContent - object: Optional[MessageOutputEntryObject] = "entry" + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" - type: Optional[MessageOutputEntryType] = "message.output" + type: Annotated[ + Annotated[ + Optional[Literal["message.output"]], + AfterValidator(validate_const("message.output")), + ], + pydantic.Field(alias="type"), + ] = "message.output" created_at: Optional[datetime] = None completed_at: OptionalNullable[datetime] = UNSET - id: Optional[str] = None - agent_id: OptionalNullable[str] = UNSET model: OptionalNullable[str] = UNSET - role: Optional[MessageOutputEntryRole] = "assistant" + id: Optional[str] = None + + role: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "object", - "type", - "created_at", - "completed_at", - "id", - "agent_id", - "model", - "role", - ] - nullable_fields = ["completed_at", "agent_id", "model"] - null_default_fields = [] - + optional_fields = set( + [ + "object", + "type", + "created_at", + "completed_at", + "agent_id", + "model", + "id", + "role", + ] + ) + nullable_fields = set(["completed_at", "agent_id", "model"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + MessageOutputEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/messageoutputevent.py b/src/mistralai/client/models/messageoutputevent.py index 39c10139..d765f4fd 100644 --- a/src/mistralai/client/models/messageoutputevent.py +++ b/src/mistralai/client/models/messageoutputevent.py @@ -19,9 +19,6 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -MessageOutputEventRole = Literal["assistant",] - - MessageOutputEventContentTypedDict = TypeAliasType( "MessageOutputEventContentTypedDict", Union[str, OutputContentChunksTypedDict] ) @@ -41,7 +38,7 @@ class MessageOutputEventTypedDict(TypedDict): content_index: NotRequired[int] model: NotRequired[Nullable[str]] agent_id: NotRequired[Nullable[str]] - role: NotRequired[MessageOutputEventRole] + role: Literal["assistant"] class MessageOutputEvent(BaseModel): @@ -49,7 +46,7 @@ class MessageOutputEvent(BaseModel): content: MessageOutputEventContent - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["message.output.delta"], AfterValidator(validate_const("message.output.delta")), @@ -67,41 +64,42 @@ class MessageOutputEvent(BaseModel): agent_id: OptionalNullable[str] = UNSET - role: Optional[MessageOutputEventRole] = "assistant" + role: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "created_at", - "output_index", - "content_index", - "model", - "agent_id", - "role", - ] - nullable_fields = ["model", "agent_id"] - null_default_fields = [] - + optional_fields = set( + ["created_at", "output_index", "content_index", "model", "agent_id", "role"] + ) + nullable_fields = set(["model", "agent_id"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + MessageOutputEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/metricout.py b/src/mistralai/client/models/metric.py similarity index 60% rename from src/mistralai/client/models/metricout.py rename to src/mistralai/client/models/metric.py index 5705c712..1413f589 100644 --- a/src/mistralai/client/models/metricout.py +++ b/src/mistralai/client/models/metric.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 92d33621dda7 +# @generated-id: c6a65acdd1a2 from __future__ import annotations from mistralai.client.types import ( @@ -13,7 +13,7 @@ from typing_extensions import NotRequired, TypedDict -class MetricOutTypedDict(TypedDict): +class MetricTypedDict(TypedDict): r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" train_loss: NotRequired[Nullable[float]] @@ -21,7 +21,7 @@ class MetricOutTypedDict(TypedDict): valid_mean_token_accuracy: NotRequired[Nullable[float]] -class MetricOut(BaseModel): +class Metric(BaseModel): r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" train_loss: OptionalNullable[float] = UNSET @@ -32,30 +32,25 @@ class MetricOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] - nullable_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] - null_default_fields = [] - + optional_fields = set(["train_loss", "valid_loss", "valid_mean_token_accuracy"]) + nullable_fields = set(["train_loss", "valid_loss", "valid_mean_token_accuracy"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/modelcapabilities.py b/src/mistralai/client/models/modelcapabilities.py index c329efbc..d9293ccc 100644 --- a/src/mistralai/client/models/modelcapabilities.py +++ b/src/mistralai/client/models/modelcapabilities.py @@ -2,7 +2,8 @@ # @generated-id: 64d8a422ea29 from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -40,3 +41,32 @@ class ModelCapabilities(BaseModel): audio: Optional[bool] = False audio_transcription: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "completion_chat", + "function_calling", + "completion_fim", + "fine_tuning", + "vision", + "ocr", + "classification", + "moderation", + "audio", + "audio_transcription", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/modelconversation.py b/src/mistralai/client/models/modelconversation.py index c0bacb7f..bb33d2e0 100644 --- a/src/mistralai/client/models/modelconversation.py +++ b/src/mistralai/client/models/modelconversation.py @@ -10,6 +10,7 @@ from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict from .websearchtool import WebSearchTool, WebSearchToolTypedDict from datetime import datetime +from functools import partial from mistralai.client.types import ( BaseModel, Nullable, @@ -17,7 +18,11 @@ UNSET, UNSET_SENTINEL, ) -from pydantic import Field, model_serializer +from mistralai.client.utils import validate_const +from mistralai.client.utils.unions import parse_open_union +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator, BeforeValidator from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -25,16 +30,36 @@ ModelConversationToolTypedDict = TypeAliasType( "ModelConversationToolTypedDict", Union[ + FunctionToolTypedDict, WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, CodeInterpreterToolTypedDict, ImageGenerationToolTypedDict, - FunctionToolTypedDict, DocumentLibraryToolTypedDict, ], ) +class UnknownModelConversationTool(BaseModel): + r"""A ModelConversationTool variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_MODEL_CONVERSATION_TOOL_VARIANTS: dict[str, Any] = { + "code_interpreter": CodeInterpreterTool, + "document_library": DocumentLibraryTool, + "function": FunctionTool, + "image_generation": ImageGenerationTool, + "web_search": WebSearchTool, + "web_search_premium": WebSearchPremiumTool, +} + + ModelConversationTool = Annotated[ Union[ CodeInterpreterTool, @@ -43,14 +68,20 @@ ImageGenerationTool, WebSearchTool, WebSearchPremiumTool, + UnknownModelConversationTool, ], - Field(discriminator="TYPE"), + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_MODEL_CONVERSATION_TOOL_VARIANTS, + unknown_cls=UnknownModelConversationTool, + union_name="ModelConversationTool", + ) + ), ] -ModelConversationObject = Literal["conversation",] - - class ModelConversationTypedDict(TypedDict): id: str created_at: datetime @@ -68,7 +99,7 @@ class ModelConversationTypedDict(TypedDict): r"""Description of the what the conversation is about.""" metadata: NotRequired[Nullable[Dict[str, Any]]] r"""Custom metadata for the conversation.""" - object: NotRequired[ModelConversationObject] + object: Literal["conversation"] class ModelConversation(BaseModel): @@ -98,42 +129,51 @@ class ModelConversation(BaseModel): metadata: OptionalNullable[Dict[str, Any]] = UNSET r"""Custom metadata for the conversation.""" - object: Optional[ModelConversationObject] = "conversation" + object: Annotated[ + Annotated[ + Optional[Literal["conversation"]], + AfterValidator(validate_const("conversation")), + ], + pydantic.Field(alias="object"), + ] = "conversation" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "name", - "description", - "metadata", - "object", - ] - nullable_fields = ["instructions", "name", "description", "metadata"] - null_default_fields = [] - + optional_fields = set( + [ + "instructions", + "tools", + "completion_args", + "name", + "description", + "metadata", + "object", + ] + ) + nullable_fields = set(["instructions", "name", "description", "metadata"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ModelConversation.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/modellist.py b/src/mistralai/client/models/modellist.py index c122122c..5fd835f2 100644 --- a/src/mistralai/client/models/modellist.py +++ b/src/mistralai/client/models/modellist.py @@ -4,9 +4,12 @@ from __future__ import annotations from .basemodelcard import BaseModelCard, BaseModelCardTypedDict from .ftmodelcard import FTModelCard, FTModelCardTypedDict -from mistralai.client.types import BaseModel -from pydantic import Field -from typing import List, Optional, Union +from functools import partial +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import BeforeValidator +from typing import Any, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -15,8 +18,33 @@ ) +class UnknownModelListData(BaseModel): + r"""A ModelListData variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_MODEL_LIST_DATA_VARIANTS: dict[str, Any] = { + "base": BaseModelCard, + "fine-tuned": FTModelCard, +} + + ModelListData = Annotated[ - Union[BaseModelCard, FTModelCard], Field(discriminator="TYPE") + Union[BaseModelCard, FTModelCard, UnknownModelListData], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_MODEL_LIST_DATA_VARIANTS, + unknown_cls=UnknownModelListData, + union_name="ModelListData", + ) + ), ] @@ -29,3 +57,19 @@ class ModelList(BaseModel): object: Optional[str] = "list" data: Optional[List[ModelListData]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "data"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/moderationobject.py b/src/mistralai/client/models/moderationobject.py index 9aa4eb15..e7ccd8f6 100644 --- a/src/mistralai/client/models/moderationobject.py +++ b/src/mistralai/client/models/moderationobject.py @@ -2,7 +2,8 @@ # @generated-id: 132faad0549a from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Dict, Optional from typing_extensions import NotRequired, TypedDict @@ -20,3 +21,19 @@ class ModerationObject(BaseModel): category_scores: Optional[Dict[str, float]] = None r"""Moderation result""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["categories", "category_scores"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/ocrimageobject.py b/src/mistralai/client/models/ocrimageobject.py index e95b67e1..365f062b 100644 --- a/src/mistralai/client/models/ocrimageobject.py +++ b/src/mistralai/client/models/ocrimageobject.py @@ -54,37 +54,34 @@ class OCRImageObject(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["image_base64", "image_annotation"] - nullable_fields = [ - "top_left_x", - "top_left_y", - "bottom_right_x", - "bottom_right_y", - "image_base64", - "image_annotation", - ] - null_default_fields = [] - + optional_fields = set(["image_base64", "image_annotation"]) + nullable_fields = set( + [ + "top_left_x", + "top_left_y", + "bottom_right_x", + "bottom_right_y", + "image_base64", + "image_annotation", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/ocrpageobject.py b/src/mistralai/client/models/ocrpageobject.py index 4f4ccf43..ffc7b3b6 100644 --- a/src/mistralai/client/models/ocrpageobject.py +++ b/src/mistralai/client/models/ocrpageobject.py @@ -63,30 +63,25 @@ class OCRPageObject(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["tables", "hyperlinks", "header", "footer"] - nullable_fields = ["header", "footer", "dimensions"] - null_default_fields = [] - + optional_fields = set(["tables", "hyperlinks", "header", "footer"]) + nullable_fields = set(["header", "footer", "dimensions"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/ocrrequest.py b/src/mistralai/client/models/ocrrequest.py index 18b899dd..4ad337ce 100644 --- a/src/mistralai/client/models/ocrrequest.py +++ b/src/mistralai/client/models/ocrrequest.py @@ -18,14 +18,16 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -DocumentTypedDict = TypeAliasType( - "DocumentTypedDict", +DocumentUnionTypedDict = TypeAliasType( + "DocumentUnionTypedDict", Union[FileChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict], ) r"""Document to run OCR on""" -Document = TypeAliasType("Document", Union[FileChunk, ImageURLChunk, DocumentURLChunk]) +DocumentUnion = TypeAliasType( + "DocumentUnion", Union[FileChunk, ImageURLChunk, DocumentURLChunk] +) r"""Document to run OCR on""" @@ -37,7 +39,7 @@ class OCRRequestTypedDict(TypedDict): model: Nullable[str] - document: DocumentTypedDict + document: DocumentUnionTypedDict r"""Document to run OCR on""" id: NotRequired[str] pages: NotRequired[Nullable[List[int]]] @@ -62,7 +64,7 @@ class OCRRequestTypedDict(TypedDict): class OCRRequest(BaseModel): model: Nullable[str] - document: Document + document: DocumentUnion r"""Document to run OCR on""" id: Optional[str] = None @@ -96,52 +98,51 @@ class OCRRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "id", - "pages", - "include_image_base64", - "image_limit", - "image_min_size", - "bbox_annotation_format", - "document_annotation_format", - "document_annotation_prompt", - "table_format", - "extract_header", - "extract_footer", - ] - nullable_fields = [ - "model", - "pages", - "include_image_base64", - "image_limit", - "image_min_size", - "bbox_annotation_format", - "document_annotation_format", - "document_annotation_prompt", - "table_format", - ] - null_default_fields = [] - + optional_fields = set( + [ + "id", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + "extract_header", + "extract_footer", + ] + ) + nullable_fields = set( + [ + "model", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/ocrresponse.py b/src/mistralai/client/models/ocrresponse.py index 0a36e975..e63eed98 100644 --- a/src/mistralai/client/models/ocrresponse.py +++ b/src/mistralai/client/models/ocrresponse.py @@ -40,30 +40,25 @@ class OCRResponse(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["document_annotation"] - nullable_fields = ["document_annotation"] - null_default_fields = [] - + optional_fields = set(["document_annotation"]) + nullable_fields = set(["document_annotation"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/ocrtableobject.py b/src/mistralai/client/models/ocrtableobject.py index e32ad894..66bb050f 100644 --- a/src/mistralai/client/models/ocrtableobject.py +++ b/src/mistralai/client/models/ocrtableobject.py @@ -36,3 +36,9 @@ class OCRTableObject(BaseModel): format_: Annotated[Format, pydantic.Field(alias="format")] r"""Format of the table""" + + +try: + OCRTableObject.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/ocrusageinfo.py b/src/mistralai/client/models/ocrusageinfo.py index a421d850..2ec1322b 100644 --- a/src/mistralai/client/models/ocrusageinfo.py +++ b/src/mistralai/client/models/ocrusageinfo.py @@ -29,30 +29,25 @@ class OCRUsageInfo(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["doc_size_bytes"] - nullable_fields = ["doc_size_bytes"] - null_default_fields = [] - + optional_fields = set(["doc_size_bytes"]) + nullable_fields = set(["doc_size_bytes"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/outputcontentchunks.py b/src/mistralai/client/models/outputcontentchunks.py index 1a115fe8..fab7907b 100644 --- a/src/mistralai/client/models/outputcontentchunks.py +++ b/src/mistralai/client/models/outputcontentchunks.py @@ -2,10 +2,13 @@ # @generated-id: 9ad9741f4975 from __future__ import annotations +from .conversationthinkchunk import ( + ConversationThinkChunk, + ConversationThinkChunkTypedDict, +) from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from .thinkchunk import ThinkChunk, ThinkChunkTypedDict from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict from typing import Union @@ -18,7 +21,7 @@ TextChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict, - ThinkChunkTypedDict, + ConversationThinkChunkTypedDict, ToolFileChunkTypedDict, ToolReferenceChunkTypedDict, ], @@ -31,7 +34,7 @@ TextChunk, ImageURLChunk, DocumentURLChunk, - ThinkChunk, + ConversationThinkChunk, ToolFileChunk, ToolReferenceChunk, ], diff --git a/src/mistralai/client/models/prediction.py b/src/mistralai/client/models/prediction.py index 52f4adf1..0c6f4182 100644 --- a/src/mistralai/client/models/prediction.py +++ b/src/mistralai/client/models/prediction.py @@ -2,9 +2,10 @@ # @generated-id: 1cc842a069a5 from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -20,7 +21,7 @@ class PredictionTypedDict(TypedDict): class Prediction(BaseModel): r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - TYPE: Annotated[ + type: Annotated[ Annotated[ Optional[Literal["content"]], AfterValidator(validate_const("content")) ], @@ -28,3 +29,25 @@ class Prediction(BaseModel): ] = "content" content: Optional[str] = "" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "content"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + Prediction.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionerror.py b/src/mistralai/client/models/realtimetranscriptionerror.py index f8f2d3da..c661e461 100644 --- a/src/mistralai/client/models/realtimetranscriptionerror.py +++ b/src/mistralai/client/models/realtimetranscriptionerror.py @@ -6,9 +6,10 @@ RealtimeTranscriptionErrorDetail, RealtimeTranscriptionErrorDetailTypedDict, ) -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, TypedDict @@ -22,7 +23,29 @@ class RealtimeTranscriptionErrorTypedDict(TypedDict): class RealtimeTranscriptionError(BaseModel): error: RealtimeTranscriptionErrorDetail - TYPE: Annotated[ + type: Annotated[ Annotated[Optional[Literal["error"]], AfterValidator(validate_const("error"))], pydantic.Field(alias="type"), ] = "error" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionError.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptioninputaudioappend.py b/src/mistralai/client/models/realtimetranscriptioninputaudioappend.py new file mode 100644 index 00000000..8156a270 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptioninputaudioappend.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8b03cde6e115 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionInputAudioAppendTypedDict(TypedDict): + audio: str + r"""Base64-encoded raw PCM bytes matching the current audio_format. Max decoded size: 262144 bytes.""" + type: Literal["input_audio.append"] + + +class RealtimeTranscriptionInputAudioAppend(BaseModel): + audio: str + r"""Base64-encoded raw PCM bytes matching the current audio_format. Max decoded size: 262144 bytes.""" + + type: Annotated[ + Annotated[ + Optional[Literal["input_audio.append"]], + AfterValidator(validate_const("input_audio.append")), + ], + pydantic.Field(alias="type"), + ] = "input_audio.append" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionInputAudioAppend.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptioninputaudioend.py b/src/mistralai/client/models/realtimetranscriptioninputaudioend.py new file mode 100644 index 00000000..473eedb7 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptioninputaudioend.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c187ba1b551d + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionInputAudioEndTypedDict(TypedDict): + type: Literal["input_audio.end"] + + +class RealtimeTranscriptionInputAudioEnd(BaseModel): + type: Annotated[ + Annotated[ + Optional[Literal["input_audio.end"]], + AfterValidator(validate_const("input_audio.end")), + ], + pydantic.Field(alias="type"), + ] = "input_audio.end" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionInputAudioEnd.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptioninputaudioflush.py b/src/mistralai/client/models/realtimetranscriptioninputaudioflush.py new file mode 100644 index 00000000..553d14c7 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptioninputaudioflush.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b27b600c310e + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionInputAudioFlushTypedDict(TypedDict): + type: Literal["input_audio.flush"] + + +class RealtimeTranscriptionInputAudioFlush(BaseModel): + type: Annotated[ + Annotated[ + Optional[Literal["input_audio.flush"]], + AfterValidator(validate_const("input_audio.flush")), + ], + pydantic.Field(alias="type"), + ] = "input_audio.flush" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionInputAudioFlush.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionsession.py b/src/mistralai/client/models/realtimetranscriptionsession.py index d20d0d8c..a74a457b 100644 --- a/src/mistralai/client/models/realtimetranscriptionsession.py +++ b/src/mistralai/client/models/realtimetranscriptionsession.py @@ -3,14 +3,22 @@ from __future__ import annotations from .audioformat import AudioFormat, AudioFormatTypedDict -from mistralai.client.types import BaseModel -from typing_extensions import TypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict class RealtimeTranscriptionSessionTypedDict(TypedDict): request_id: str model: str audio_format: AudioFormatTypedDict + target_streaming_delay_ms: NotRequired[Nullable[int]] class RealtimeTranscriptionSession(BaseModel): @@ -19,3 +27,30 @@ class RealtimeTranscriptionSession(BaseModel): model: str audio_format: AudioFormat + + target_streaming_delay_ms: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["target_streaming_delay_ms"]) + nullable_fields = set(["target_streaming_delay_ms"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/realtimetranscriptionsessioncreated.py b/src/mistralai/client/models/realtimetranscriptionsessioncreated.py index c4fa5774..bb96875a 100644 --- a/src/mistralai/client/models/realtimetranscriptionsessioncreated.py +++ b/src/mistralai/client/models/realtimetranscriptionsessioncreated.py @@ -6,9 +6,10 @@ RealtimeTranscriptionSession, RealtimeTranscriptionSessionTypedDict, ) -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, TypedDict @@ -22,10 +23,32 @@ class RealtimeTranscriptionSessionCreatedTypedDict(TypedDict): class RealtimeTranscriptionSessionCreated(BaseModel): session: RealtimeTranscriptionSession - TYPE: Annotated[ + type: Annotated[ Annotated[ Optional[Literal["session.created"]], AfterValidator(validate_const("session.created")), ], pydantic.Field(alias="type"), ] = "session.created" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionSessionCreated.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionsessionupdated.py b/src/mistralai/client/models/realtimetranscriptionsessionupdated.py index a61fb05e..fea5db4a 100644 --- a/src/mistralai/client/models/realtimetranscriptionsessionupdated.py +++ b/src/mistralai/client/models/realtimetranscriptionsessionupdated.py @@ -6,9 +6,10 @@ RealtimeTranscriptionSession, RealtimeTranscriptionSessionTypedDict, ) -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, TypedDict @@ -22,10 +23,32 @@ class RealtimeTranscriptionSessionUpdatedTypedDict(TypedDict): class RealtimeTranscriptionSessionUpdated(BaseModel): session: RealtimeTranscriptionSession - TYPE: Annotated[ + type: Annotated[ Annotated[ Optional[Literal["session.updated"]], AfterValidator(validate_const("session.updated")), ], pydantic.Field(alias="type"), ] = "session.updated" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionSessionUpdated.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionsessionupdatemessage.py b/src/mistralai/client/models/realtimetranscriptionsessionupdatemessage.py new file mode 100644 index 00000000..07ad59a4 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsessionupdatemessage.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4e1b3fd7c5a3 + +from __future__ import annotations +from .realtimetranscriptionsessionupdatepayload import ( + RealtimeTranscriptionSessionUpdatePayload, + RealtimeTranscriptionSessionUpdatePayloadTypedDict, +) +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionSessionUpdateMessageTypedDict(TypedDict): + session: RealtimeTranscriptionSessionUpdatePayloadTypedDict + type: Literal["session.update"] + + +class RealtimeTranscriptionSessionUpdateMessage(BaseModel): + session: RealtimeTranscriptionSessionUpdatePayload + + type: Annotated[ + Annotated[ + Optional[Literal["session.update"]], + AfterValidator(validate_const("session.update")), + ], + pydantic.Field(alias="type"), + ] = "session.update" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionSessionUpdateMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionsessionupdatepayload.py b/src/mistralai/client/models/realtimetranscriptionsessionupdatepayload.py new file mode 100644 index 00000000..a89441e9 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsessionupdatepayload.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7033fdb33ad4 + +from __future__ import annotations +from .audioformat import AudioFormat, AudioFormatTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class RealtimeTranscriptionSessionUpdatePayloadTypedDict(TypedDict): + audio_format: NotRequired[Nullable[AudioFormatTypedDict]] + r"""Set before sending audio. Audio format updates are rejected after audio starts.""" + target_streaming_delay_ms: NotRequired[Nullable[int]] + r"""Set before sending audio. Streaming delay updates are rejected after audio starts.""" + + +class RealtimeTranscriptionSessionUpdatePayload(BaseModel): + audio_format: OptionalNullable[AudioFormat] = UNSET + r"""Set before sending audio. Audio format updates are rejected after audio starts.""" + + target_streaming_delay_ms: OptionalNullable[int] = UNSET + r"""Set before sending audio. Streaming delay updates are rejected after audio starts.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["audio_format", "target_streaming_delay_ms"]) + nullable_fields = set(["audio_format", "target_streaming_delay_ms"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/referencechunk.py b/src/mistralai/client/models/referencechunk.py index 7634d8ae..e0bbae4e 100644 --- a/src/mistralai/client/models/referencechunk.py +++ b/src/mistralai/client/models/referencechunk.py @@ -2,20 +2,48 @@ # @generated-id: 921acd3a224a from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ReferenceChunkType = Literal["reference",] +from typing_extensions import Annotated, TypedDict class ReferenceChunkTypedDict(TypedDict): reference_ids: List[int] - type: NotRequired[ReferenceChunkType] + type: Literal["reference"] class ReferenceChunk(BaseModel): reference_ids: List[int] - type: Optional[ReferenceChunkType] = "reference" + type: Annotated[ + Annotated[ + Optional[Literal["reference"]], AfterValidator(validate_const("reference")) + ], + pydantic.Field(alias="type"), + ] = "reference" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ReferenceChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/reprocessdocumentop.py b/src/mistralai/client/models/reprocessdocumentop.py deleted file mode 100644 index 48a4b72b..00000000 --- a/src/mistralai/client/models/reprocessdocumentop.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: b2913a7aa5c9 - -from __future__ import annotations -from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class ReprocessDocumentRequestTypedDict(TypedDict): - library_id: str - document_id: str - - -class ReprocessDocumentRequest(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/client/models/responsedoneevent.py b/src/mistralai/client/models/responsedoneevent.py index ed331ff1..be38fba8 100644 --- a/src/mistralai/client/models/responsedoneevent.py +++ b/src/mistralai/client/models/responsedoneevent.py @@ -4,9 +4,10 @@ from __future__ import annotations from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict from datetime import datetime -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -21,7 +22,7 @@ class ResponseDoneEventTypedDict(TypedDict): class ResponseDoneEvent(BaseModel): usage: ConversationUsageInfo - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["conversation.response.done"], AfterValidator(validate_const("conversation.response.done")), @@ -30,3 +31,25 @@ class ResponseDoneEvent(BaseModel): ] = "conversation.response.done" created_at: Optional[datetime] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ResponseDoneEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/responseerrorevent.py b/src/mistralai/client/models/responseerrorevent.py index 8f196a52..fa4d0d01 100644 --- a/src/mistralai/client/models/responseerrorevent.py +++ b/src/mistralai/client/models/responseerrorevent.py @@ -3,9 +3,10 @@ from __future__ import annotations from datetime import datetime -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -23,7 +24,7 @@ class ResponseErrorEvent(BaseModel): code: int - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["conversation.response.error"], AfterValidator(validate_const("conversation.response.error")), @@ -32,3 +33,25 @@ class ResponseErrorEvent(BaseModel): ] = "conversation.response.error" created_at: Optional[datetime] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ResponseErrorEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/responseformat.py b/src/mistralai/client/models/responseformat.py index 409b80d6..b2971412 100644 --- a/src/mistralai/client/models/responseformat.py +++ b/src/mistralai/client/models/responseformat.py @@ -32,30 +32,25 @@ class ResponseFormat(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "json_schema"] - nullable_fields = ["json_schema"] - null_default_fields = [] - + optional_fields = set(["type", "json_schema"]) + nullable_fields = set(["json_schema"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/responsestartedevent.py b/src/mistralai/client/models/responsestartedevent.py index 256d2a6c..84abfcd9 100644 --- a/src/mistralai/client/models/responsestartedevent.py +++ b/src/mistralai/client/models/responsestartedevent.py @@ -3,9 +3,10 @@ from __future__ import annotations from datetime import datetime -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -20,7 +21,7 @@ class ResponseStartedEventTypedDict(TypedDict): class ResponseStartedEvent(BaseModel): conversation_id: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["conversation.response.started"], AfterValidator(validate_const("conversation.response.started")), @@ -29,3 +30,25 @@ class ResponseStartedEvent(BaseModel): ] = "conversation.response.started" created_at: Optional[datetime] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ResponseStartedEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py new file mode 100644 index 00000000..cd5955c1 --- /dev/null +++ b/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py @@ -0,0 +1,64 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6fefa90ca351 + +from __future__ import annotations +from .basemodelcard import BaseModelCard, BaseModelCardTypedDict +from .ftmodelcard import FTModelCard, FTModelCardTypedDict +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class RetrieveModelV1ModelsModelIDGetRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to retrieve.""" + + +class RetrieveModelV1ModelsModelIDGetRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to retrieve.""" + + +ResponseRetrieveModelV1ModelsModelIDGetTypedDict = TypeAliasType( + "ResponseRetrieveModelV1ModelsModelIDGetTypedDict", + Union[BaseModelCardTypedDict, FTModelCardTypedDict], +) +r"""Successful Response""" + + +class UnknownResponseRetrieveModelV1ModelsModelIDGet(BaseModel): + r"""A ResponseRetrieveModelV1ModelsModelIDGet variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_RESPONSE_RETRIEVE_MODEL_V1_MODELS_MODEL_ID_GET_VARIANTS: dict[str, Any] = { + "base": BaseModelCard, + "fine-tuned": FTModelCard, +} + + +ResponseRetrieveModelV1ModelsModelIDGet = Annotated[ + Union[BaseModelCard, FTModelCard, UnknownResponseRetrieveModelV1ModelsModelIDGet], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_RESPONSE_RETRIEVE_MODEL_V1_MODELS_MODEL_ID_GET_VARIANTS, + unknown_cls=UnknownResponseRetrieveModelV1ModelsModelIDGet, + union_name="ResponseRetrieveModelV1ModelsModelIDGet", + ) + ), +] +r"""Successful Response""" diff --git a/src/mistralai/client/models/retrievemodelop.py b/src/mistralai/client/models/retrievemodelop.py deleted file mode 100644 index b4334e9a..00000000 --- a/src/mistralai/client/models/retrievemodelop.py +++ /dev/null @@ -1,36 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: d883baa79c9e - -from __future__ import annotations -from .basemodelcard import BaseModelCard, BaseModelCardTypedDict -from .ftmodelcard import FTModelCard, FTModelCardTypedDict -from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata -from pydantic import Field -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class RetrieveModelRequestTypedDict(TypedDict): - model_id: str - r"""The ID of the model to retrieve.""" - - -class RetrieveModelRequest(BaseModel): - model_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the model to retrieve.""" - - -ResponseRetrieveModelV1ModelsModelIDGetTypedDict = TypeAliasType( - "ResponseRetrieveModelV1ModelsModelIDGetTypedDict", - Union[BaseModelCardTypedDict, FTModelCardTypedDict], -) -r"""Successful Response""" - - -ResponseRetrieveModelV1ModelsModelIDGet = Annotated[ - Union[BaseModelCard, FTModelCard], Field(discriminator="TYPE") -] -r"""Successful Response""" diff --git a/src/mistralai/client/models/security.py b/src/mistralai/client/models/security.py index 4fa8b4b2..f3b3423e 100644 --- a/src/mistralai/client/models/security.py +++ b/src/mistralai/client/models/security.py @@ -2,8 +2,9 @@ # @generated-id: c2ca0e2a36b7 from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import FieldMetadata, SecurityMetadata +from pydantic import model_serializer from typing import Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -24,3 +25,19 @@ class Security(BaseModel): ) ), ] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["ApiKey"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/sharingdelete.py b/src/mistralai/client/models/sharingdelete.py index 202732cf..33ccd7e7 100644 --- a/src/mistralai/client/models/sharingdelete.py +++ b/src/mistralai/client/models/sharingdelete.py @@ -33,30 +33,25 @@ class SharingDelete(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["org_id"] - nullable_fields = ["org_id"] - null_default_fields = [] - + optional_fields = set(["org_id"]) + nullable_fields = set(["org_id"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/sharingin.py b/src/mistralai/client/models/sharingin.py index 8cc3e896..7c1a52b0 100644 --- a/src/mistralai/client/models/sharingin.py +++ b/src/mistralai/client/models/sharingin.py @@ -37,30 +37,25 @@ class SharingIn(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["org_id"] - nullable_fields = ["org_id"] - null_default_fields = [] - + optional_fields = set(["org_id"]) + nullable_fields = set(["org_id"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/sharingout.py b/src/mistralai/client/models/sharingout.py index 77807154..ab3679a4 100644 --- a/src/mistralai/client/models/sharingout.py +++ b/src/mistralai/client/models/sharingout.py @@ -37,30 +37,25 @@ class SharingOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["user_id"] - nullable_fields = ["user_id", "share_with_uuid"] - null_default_fields = [] - + optional_fields = set(["user_id"]) + nullable_fields = set(["user_id", "share_with_uuid"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/startfinetuningjobop.py b/src/mistralai/client/models/startfinetuningjobop.py deleted file mode 100644 index 805a8721..00000000 --- a/src/mistralai/client/models/startfinetuningjobop.py +++ /dev/null @@ -1,41 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 663886392468 - -from __future__ import annotations -from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutTypedDict, -) -from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutTypedDict, -) -from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata -from pydantic import Field -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class StartFineTuningJobRequestTypedDict(TypedDict): - job_id: str - - -class StartFineTuningJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - -StartFineTuningJobResponseTypedDict = TypeAliasType( - "StartFineTuningJobResponseTypedDict", - Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], -) -r"""OK""" - - -StartFineTuningJobResponse = Annotated[ - Union[ClassifierDetailedJobOut, CompletionDetailedJobOut], - Field(discriminator="JOB_TYPE"), -] -r"""OK""" diff --git a/src/mistralai/client/models/systemmessage.py b/src/mistralai/client/models/systemmessage.py index 352eca76..2602cd2d 100644 --- a/src/mistralai/client/models/systemmessage.py +++ b/src/mistralai/client/models/systemmessage.py @@ -33,7 +33,13 @@ class SystemMessageTypedDict(TypedDict): class SystemMessage(BaseModel): content: SystemMessageContent - ROLE: Annotated[ + role: Annotated[ Annotated[Literal["system"], AfterValidator(validate_const("system"))], pydantic.Field(alias="role"), ] = "system" + + +try: + SystemMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/textchunk.py b/src/mistralai/client/models/textchunk.py index c0584234..ac9f3137 100644 --- a/src/mistralai/client/models/textchunk.py +++ b/src/mistralai/client/models/textchunk.py @@ -2,20 +2,46 @@ # @generated-id: 9c96fb86a9ab from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TextChunkType = Literal["text",] +from typing_extensions import Annotated, TypedDict class TextChunkTypedDict(TypedDict): text: str - type: NotRequired[TextChunkType] + type: Literal["text"] class TextChunk(BaseModel): text: str - type: Optional[TextChunkType] = "text" + type: Annotated[ + Annotated[Optional[Literal["text"]], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + TextChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/thinkchunk.py b/src/mistralai/client/models/thinkchunk.py index a999f5d7..5995e601 100644 --- a/src/mistralai/client/models/thinkchunk.py +++ b/src/mistralai/client/models/thinkchunk.py @@ -4,33 +4,61 @@ from __future__ import annotations from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -ThinkingTypedDict = TypeAliasType( - "ThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] +ThinkChunkThinkingTypedDict = TypeAliasType( + "ThinkChunkThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] ) -Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) - - -ThinkChunkType = Literal["thinking",] +ThinkChunkThinking = TypeAliasType( + "ThinkChunkThinking", Union[ReferenceChunk, TextChunk] +) class ThinkChunkTypedDict(TypedDict): - thinking: List[ThinkingTypedDict] + thinking: List[ThinkChunkThinkingTypedDict] + type: Literal["thinking"] closed: NotRequired[bool] r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" - type: NotRequired[ThinkChunkType] class ThinkChunk(BaseModel): - thinking: List[Thinking] + thinking: List[ThinkChunkThinking] + + type: Annotated[ + Annotated[Literal["thinking"], AfterValidator(validate_const("thinking"))], + pydantic.Field(alias="type"), + ] = "thinking" closed: Optional[bool] = None r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" - type: Optional[ThinkChunkType] = "thinking" + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["closed"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ThinkChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/tool.py b/src/mistralai/client/models/tool.py index a46d31f1..2b9965e5 100644 --- a/src/mistralai/client/models/tool.py +++ b/src/mistralai/client/models/tool.py @@ -4,7 +4,8 @@ from __future__ import annotations from .function import Function, FunctionTypedDict from .tooltypes import ToolTypes -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -18,3 +19,19 @@ class Tool(BaseModel): function: Function type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolcall.py b/src/mistralai/client/models/toolcall.py index 4a05bbd0..181cec33 100644 --- a/src/mistralai/client/models/toolcall.py +++ b/src/mistralai/client/models/toolcall.py @@ -4,7 +4,8 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -24,3 +25,19 @@ class ToolCall(BaseModel): type: Optional[ToolTypes] = None index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["id", "type", "index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolcallconfirmation.py b/src/mistralai/client/models/toolcallconfirmation.py new file mode 100644 index 00000000..fd6eca50 --- /dev/null +++ b/src/mistralai/client/models/toolcallconfirmation.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f2e953cfb4fe + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal +from typing_extensions import TypedDict + + +Confirmation = Literal[ + "allow", + "deny", +] + + +class ToolCallConfirmationTypedDict(TypedDict): + tool_call_id: str + confirmation: Confirmation + + +class ToolCallConfirmation(BaseModel): + tool_call_id: str + + confirmation: Confirmation diff --git a/src/mistralai/client/models/toolchoice.py b/src/mistralai/client/models/toolchoice.py index aa2016fb..cb787df1 100644 --- a/src/mistralai/client/models/toolchoice.py +++ b/src/mistralai/client/models/toolchoice.py @@ -4,7 +4,8 @@ from __future__ import annotations from .functionname import FunctionName, FunctionNameTypedDict from .tooltypes import ToolTypes -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -24,3 +25,19 @@ class ToolChoice(BaseModel): r"""this restriction of `Function` is used to select a specific function to call""" type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolconfiguration.py b/src/mistralai/client/models/toolconfiguration.py new file mode 100644 index 00000000..b903c8b6 --- /dev/null +++ b/src/mistralai/client/models/toolconfiguration.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: faec24b75066 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List +from typing_extensions import NotRequired, TypedDict + + +class ToolConfigurationTypedDict(TypedDict): + exclude: NotRequired[Nullable[List[str]]] + include: NotRequired[Nullable[List[str]]] + requires_confirmation: NotRequired[Nullable[List[str]]] + + +class ToolConfiguration(BaseModel): + exclude: OptionalNullable[List[str]] = UNSET + + include: OptionalNullable[List[str]] = UNSET + + requires_confirmation: OptionalNullable[List[str]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["exclude", "include", "requires_confirmation"]) + nullable_fields = set(["exclude", "include", "requires_confirmation"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolexecutiondeltaevent.py b/src/mistralai/client/models/toolexecutiondeltaevent.py index 384ec240..5a977ca6 100644 --- a/src/mistralai/client/models/toolexecutiondeltaevent.py +++ b/src/mistralai/client/models/toolexecutiondeltaevent.py @@ -4,9 +4,10 @@ from __future__ import annotations from .builtinconnectors import BuiltInConnectors from datetime import datetime -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -38,7 +39,7 @@ class ToolExecutionDeltaEvent(BaseModel): arguments: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["tool.execution.delta"], AfterValidator(validate_const("tool.execution.delta")), @@ -49,3 +50,25 @@ class ToolExecutionDeltaEvent(BaseModel): created_at: Optional[datetime] = None output_index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ToolExecutionDeltaEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolexecutiondoneevent.py b/src/mistralai/client/models/toolexecutiondoneevent.py index 56f28899..1c9b0ec9 100644 --- a/src/mistralai/client/models/toolexecutiondoneevent.py +++ b/src/mistralai/client/models/toolexecutiondoneevent.py @@ -4,9 +4,10 @@ from __future__ import annotations from .builtinconnectors import BuiltInConnectors from datetime import datetime -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Any, Dict, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -36,7 +37,7 @@ class ToolExecutionDoneEvent(BaseModel): name: ToolExecutionDoneEventName - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["tool.execution.done"], AfterValidator(validate_const("tool.execution.done")), @@ -49,3 +50,25 @@ class ToolExecutionDoneEvent(BaseModel): output_index: Optional[int] = 0 info: Optional[Dict[str, Any]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index", "info"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ToolExecutionDoneEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolexecutionentry.py b/src/mistralai/client/models/toolexecutionentry.py index 158cbf06..0d6f2a13 100644 --- a/src/mistralai/client/models/toolexecutionentry.py +++ b/src/mistralai/client/models/toolexecutionentry.py @@ -11,15 +11,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Any, Dict, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolExecutionEntryObject = Literal["entry",] - - -ToolExecutionEntryType = Literal["tool.execution",] +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ToolExecutionEntryNameTypedDict = TypeAliasType( @@ -35,10 +32,12 @@ class ToolExecutionEntryTypedDict(TypedDict): name: ToolExecutionEntryNameTypedDict arguments: str - object: NotRequired[ToolExecutionEntryObject] - type: NotRequired[ToolExecutionEntryType] + object: Literal["entry"] + type: Literal["tool.execution"] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] + agent_id: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] id: NotRequired[str] info: NotRequired[Dict[str, Any]] @@ -48,44 +47,69 @@ class ToolExecutionEntry(BaseModel): arguments: str - object: Optional[ToolExecutionEntryObject] = "entry" + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" - type: Optional[ToolExecutionEntryType] = "tool.execution" + type: Annotated[ + Annotated[ + Optional[Literal["tool.execution"]], + AfterValidator(validate_const("tool.execution")), + ], + pydantic.Field(alias="type"), + ] = "tool.execution" created_at: Optional[datetime] = None completed_at: OptionalNullable[datetime] = UNSET + agent_id: OptionalNullable[str] = UNSET + + model: OptionalNullable[str] = UNSET + id: Optional[str] = None info: Optional[Dict[str, Any]] = None @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id", "info"] - nullable_fields = ["completed_at"] - null_default_fields = [] - + optional_fields = set( + [ + "object", + "type", + "created_at", + "completed_at", + "agent_id", + "model", + "id", + "info", + ] + ) + nullable_fields = set(["completed_at", "agent_id", "model"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ToolExecutionEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolexecutionstartedevent.py b/src/mistralai/client/models/toolexecutionstartedevent.py index 15918669..21e5bfa8 100644 --- a/src/mistralai/client/models/toolexecutionstartedevent.py +++ b/src/mistralai/client/models/toolexecutionstartedevent.py @@ -4,9 +4,16 @@ from __future__ import annotations from .builtinconnectors import BuiltInConnectors from datetime import datetime -from mistralai.client.types import BaseModel +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -29,6 +36,8 @@ class ToolExecutionStartedEventTypedDict(TypedDict): type: Literal["tool.execution.started"] created_at: NotRequired[datetime] output_index: NotRequired[int] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] class ToolExecutionStartedEvent(BaseModel): @@ -38,7 +47,7 @@ class ToolExecutionStartedEvent(BaseModel): arguments: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["tool.execution.started"], AfterValidator(validate_const("tool.execution.started")), @@ -49,3 +58,38 @@ class ToolExecutionStartedEvent(BaseModel): created_at: Optional[datetime] = None output_index: Optional[int] = 0 + + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index", "model", "agent_id"]) + nullable_fields = set(["model", "agent_id"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ToolExecutionStartedEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolfilechunk.py b/src/mistralai/client/models/toolfilechunk.py index 6eebd562..0708b3ff 100644 --- a/src/mistralai/client/models/toolfilechunk.py +++ b/src/mistralai/client/models/toolfilechunk.py @@ -10,12 +10,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolFileChunkType = Literal["tool_file",] +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ToolFileChunkToolTypedDict = TypeAliasType( @@ -29,7 +29,7 @@ class ToolFileChunkTypedDict(TypedDict): tool: ToolFileChunkToolTypedDict file_id: str - type: NotRequired[ToolFileChunkType] + type: Literal["tool_file"] file_name: NotRequired[Nullable[str]] file_type: NotRequired[Nullable[str]] @@ -39,7 +39,12 @@ class ToolFileChunk(BaseModel): file_id: str - type: Optional[ToolFileChunkType] = "tool_file" + type: Annotated[ + Annotated[ + Optional[Literal["tool_file"]], AfterValidator(validate_const("tool_file")) + ], + pydantic.Field(alias="type"), + ] = "tool_file" file_name: OptionalNullable[str] = UNSET @@ -47,30 +52,31 @@ class ToolFileChunk(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "file_name", "file_type"] - nullable_fields = ["file_name", "file_type"] - null_default_fields = [] - + optional_fields = set(["type", "file_name", "file_type"]) + nullable_fields = set(["file_name", "file_type"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ToolFileChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolmessage.py b/src/mistralai/client/models/toolmessage.py index b3e8ffd9..05a0ee63 100644 --- a/src/mistralai/client/models/toolmessage.py +++ b/src/mistralai/client/models/toolmessage.py @@ -28,49 +28,50 @@ class ToolMessageTypedDict(TypedDict): content: Nullable[ToolMessageContentTypedDict] + role: Literal["tool"] tool_call_id: NotRequired[Nullable[str]] name: NotRequired[Nullable[str]] - role: Literal["tool"] class ToolMessage(BaseModel): content: Nullable[ToolMessageContent] - tool_call_id: OptionalNullable[str] = UNSET - - name: OptionalNullable[str] = UNSET - - ROLE: Annotated[ + role: Annotated[ Annotated[Literal["tool"], AfterValidator(validate_const("tool"))], pydantic.Field(alias="role"), ] = "tool" + tool_call_id: OptionalNullable[str] = UNSET + + name: OptionalNullable[str] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["tool_call_id", "name"] - nullable_fields = ["content", "tool_call_id", "name"] - null_default_fields = [] - + optional_fields = set(["tool_call_id", "name"]) + nullable_fields = set(["content", "tool_call_id", "name"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ToolMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolreferencechunk.py b/src/mistralai/client/models/toolreferencechunk.py index 3c76c8c2..95454fe8 100644 --- a/src/mistralai/client/models/toolreferencechunk.py +++ b/src/mistralai/client/models/toolreferencechunk.py @@ -10,12 +10,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolReferenceChunkType = Literal["tool_reference",] +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ToolReferenceChunkToolTypedDict = TypeAliasType( @@ -31,7 +31,7 @@ class ToolReferenceChunkTypedDict(TypedDict): tool: ToolReferenceChunkToolTypedDict title: str - type: NotRequired[ToolReferenceChunkType] + type: Literal["tool_reference"] url: NotRequired[Nullable[str]] favicon: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] @@ -42,7 +42,13 @@ class ToolReferenceChunk(BaseModel): title: str - type: Optional[ToolReferenceChunkType] = "tool_reference" + type: Annotated[ + Annotated[ + Optional[Literal["tool_reference"]], + AfterValidator(validate_const("tool_reference")), + ], + pydantic.Field(alias="type"), + ] = "tool_reference" url: OptionalNullable[str] = UNSET @@ -52,30 +58,31 @@ class ToolReferenceChunk(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "url", "favicon", "description"] - nullable_fields = ["url", "favicon", "description"] - null_default_fields = [] - + optional_fields = set(["type", "url", "favicon", "description"]) + nullable_fields = set(["url", "favicon", "description"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ToolReferenceChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/trainingfile.py b/src/mistralai/client/models/trainingfile.py index 1f710ff8..2faeda8b 100644 --- a/src/mistralai/client/models/trainingfile.py +++ b/src/mistralai/client/models/trainingfile.py @@ -2,7 +2,8 @@ # @generated-id: 2edf9bce227d from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -16,3 +17,19 @@ class TrainingFile(BaseModel): file_id: str weight: Optional[float] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["weight"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/transcriptionresponse.py b/src/mistralai/client/models/transcriptionresponse.py index 786863ec..70315463 100644 --- a/src/mistralai/client/models/transcriptionresponse.py +++ b/src/mistralai/client/models/transcriptionresponse.py @@ -48,32 +48,27 @@ def additional_properties(self, value): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["segments"] - nullable_fields = ["language"] - null_default_fields = [] - + optional_fields = set(["segments"]) + nullable_fields = set(["language"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val for k, v in serialized.items(): m[k] = v diff --git a/src/mistralai/client/models/transcriptionsegmentchunk.py b/src/mistralai/client/models/transcriptionsegmentchunk.py index c78bec30..b87bfc2f 100644 --- a/src/mistralai/client/models/transcriptionsegmentchunk.py +++ b/src/mistralai/client/models/transcriptionsegmentchunk.py @@ -9,22 +9,21 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const import pydantic from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionSegmentChunkType = Literal["transcription_segment",] +from typing_extensions import Annotated, NotRequired, TypedDict class TranscriptionSegmentChunkTypedDict(TypedDict): text: str start: float end: float + type: Literal["transcription_segment"] score: NotRequired[Nullable[float]] speaker_id: NotRequired[Nullable[str]] - type: NotRequired[TranscriptionSegmentChunkType] class TranscriptionSegmentChunk(BaseModel): @@ -39,12 +38,18 @@ class TranscriptionSegmentChunk(BaseModel): end: float + type: Annotated[ + Annotated[ + Optional[Literal["transcription_segment"]], + AfterValidator(validate_const("transcription_segment")), + ], + pydantic.Field(alias="type"), + ] = "transcription_segment" + score: OptionalNullable[float] = UNSET speaker_id: OptionalNullable[str] = UNSET - type: Optional[TranscriptionSegmentChunkType] = "transcription_segment" - @property def additional_properties(self): return self.__pydantic_extra__ @@ -55,33 +60,34 @@ def additional_properties(self, value): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["score", "speaker_id", "type"] - nullable_fields = ["score", "speaker_id"] - null_default_fields = [] - + optional_fields = set(["type", "score", "speaker_id"]) + nullable_fields = set(["score", "speaker_id"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val for k, v in serialized.items(): m[k] = v return m + + +try: + TranscriptionSegmentChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/transcriptionstreamdone.py b/src/mistralai/client/models/transcriptionstreamdone.py index b5740b3b..e3c50169 100644 --- a/src/mistralai/client/models/transcriptionstreamdone.py +++ b/src/mistralai/client/models/transcriptionstreamdone.py @@ -41,7 +41,7 @@ class TranscriptionStreamDone(BaseModel): segments: Optional[List[TranscriptionSegmentChunk]] = None - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["transcription.done"], AfterValidator(validate_const("transcription.done")), @@ -59,33 +59,34 @@ def additional_properties(self, value): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["segments"] - nullable_fields = ["language"] - null_default_fields = [] - + optional_fields = set(["segments"]) + nullable_fields = set(["language"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val for k, v in serialized.items(): m[k] = v return m + + +try: + TranscriptionStreamDone.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/transcriptionstreamevents.py b/src/mistralai/client/models/transcriptionstreamevents.py index 17161a17..073fd99a 100644 --- a/src/mistralai/client/models/transcriptionstreamevents.py +++ b/src/mistralai/client/models/transcriptionstreamevents.py @@ -19,9 +19,12 @@ TranscriptionStreamTextDelta, TranscriptionStreamTextDeltaTypedDict, ) +from functools import partial from mistralai.client.types import BaseModel -from pydantic import Field -from typing import Union +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -36,14 +39,41 @@ ) +class UnknownTranscriptionStreamEventsData(BaseModel): + r"""A TranscriptionStreamEventsData variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_TRANSCRIPTION_STREAM_EVENTS_DATA_VARIANTS: dict[str, Any] = { + "transcription.done": TranscriptionStreamDone, + "transcription.language": TranscriptionStreamLanguage, + "transcription.segment": TranscriptionStreamSegmentDelta, + "transcription.text.delta": TranscriptionStreamTextDelta, +} + + TranscriptionStreamEventsData = Annotated[ Union[ TranscriptionStreamDone, TranscriptionStreamLanguage, TranscriptionStreamSegmentDelta, TranscriptionStreamTextDelta, + UnknownTranscriptionStreamEventsData, ], - Field(discriminator="TYPE"), + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_TRANSCRIPTION_STREAM_EVENTS_DATA_VARIANTS, + unknown_cls=UnknownTranscriptionStreamEventsData, + union_name="TranscriptionStreamEventsData", + ) + ), ] diff --git a/src/mistralai/client/models/transcriptionstreamlanguage.py b/src/mistralai/client/models/transcriptionstreamlanguage.py index 67b3e979..b6c61906 100644 --- a/src/mistralai/client/models/transcriptionstreamlanguage.py +++ b/src/mistralai/client/models/transcriptionstreamlanguage.py @@ -24,7 +24,7 @@ class TranscriptionStreamLanguage(BaseModel): audio_language: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["transcription.language"], AfterValidator(validate_const("transcription.language")), @@ -39,3 +39,9 @@ def additional_properties(self): @additional_properties.setter def additional_properties(self, value): self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + +try: + TranscriptionStreamLanguage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/transcriptionstreamsegmentdelta.py b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py index 8db5e736..32ef8f9b 100644 --- a/src/mistralai/client/models/transcriptionstreamsegmentdelta.py +++ b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py @@ -21,8 +21,8 @@ class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): text: str start: float end: float - speaker_id: NotRequired[Nullable[str]] type: Literal["transcription.segment"] + speaker_id: NotRequired[Nullable[str]] class TranscriptionStreamSegmentDelta(BaseModel): @@ -37,9 +37,7 @@ class TranscriptionStreamSegmentDelta(BaseModel): end: float - speaker_id: OptionalNullable[str] = UNSET - - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["transcription.segment"], AfterValidator(validate_const("transcription.segment")), @@ -47,6 +45,8 @@ class TranscriptionStreamSegmentDelta(BaseModel): pydantic.Field(alias="type"), ] = "transcription.segment" + speaker_id: OptionalNullable[str] = UNSET + @property def additional_properties(self): return self.__pydantic_extra__ @@ -57,33 +57,34 @@ def additional_properties(self, value): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["speaker_id"] - nullable_fields = ["speaker_id"] - null_default_fields = [] - + optional_fields = set(["speaker_id"]) + nullable_fields = set(["speaker_id"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val for k, v in serialized.items(): m[k] = v return m + + +try: + TranscriptionStreamSegmentDelta.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/transcriptionstreamtextdelta.py b/src/mistralai/client/models/transcriptionstreamtextdelta.py index 49338a08..42f0ffb7 100644 --- a/src/mistralai/client/models/transcriptionstreamtextdelta.py +++ b/src/mistralai/client/models/transcriptionstreamtextdelta.py @@ -24,7 +24,7 @@ class TranscriptionStreamTextDelta(BaseModel): text: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["transcription.text.delta"], AfterValidator(validate_const("transcription.text.delta")), @@ -39,3 +39,9 @@ def additional_properties(self): @additional_properties.setter def additional_properties(self, value): self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + +try: + TranscriptionStreamTextDelta.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/unarchiveftmodelout.py b/src/mistralai/client/models/unarchiveftmodelout.py deleted file mode 100644 index 0249a69e..00000000 --- a/src/mistralai/client/models/unarchiveftmodelout.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 9dbc3bfb71ed - -from __future__ import annotations -from mistralai.client.types import BaseModel -from mistralai.client.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class UnarchiveFTModelOutTypedDict(TypedDict): - id: str - object: Literal["model"] - archived: NotRequired[bool] - - -class UnarchiveFTModelOut(BaseModel): - id: str - - OBJECT: Annotated[ - Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], - pydantic.Field(alias="object"), - ] = "model" - - archived: Optional[bool] = False diff --git a/src/mistralai/client/models/unarchivemodelresponse.py b/src/mistralai/client/models/unarchivemodelresponse.py new file mode 100644 index 00000000..5c75d30e --- /dev/null +++ b/src/mistralai/client/models/unarchivemodelresponse.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 22e2ccbb0c80 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class UnarchiveModelResponseTypedDict(TypedDict): + id: str + object: Literal["model"] + archived: NotRequired[bool] + + +class UnarchiveModelResponse(BaseModel): + id: str + + object: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" + + archived: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "archived"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + UnarchiveModelResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agentupdaterequest.py b/src/mistralai/client/models/updateagentrequest.py similarity index 66% rename from src/mistralai/client/models/agentupdaterequest.py rename to src/mistralai/client/models/updateagentrequest.py index 96e209d4..b751ff74 100644 --- a/src/mistralai/client/models/agentupdaterequest.py +++ b/src/mistralai/client/models/updateagentrequest.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 2d5a3a437819 +# @generated-id: 914b4b2be67a from __future__ import annotations from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict @@ -21,20 +21,20 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -AgentUpdateRequestToolTypedDict = TypeAliasType( - "AgentUpdateRequestToolTypedDict", +UpdateAgentRequestToolTypedDict = TypeAliasType( + "UpdateAgentRequestToolTypedDict", Union[ + FunctionToolTypedDict, WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, CodeInterpreterToolTypedDict, ImageGenerationToolTypedDict, - FunctionToolTypedDict, DocumentLibraryToolTypedDict, ], ) -AgentUpdateRequestTool = Annotated[ +UpdateAgentRequestTool = Annotated[ Union[ CodeInterpreterTool, DocumentLibraryTool, @@ -43,14 +43,14 @@ WebSearchTool, WebSearchPremiumTool, ], - Field(discriminator="TYPE"), + Field(discriminator="type"), ] -class AgentUpdateRequestTypedDict(TypedDict): +class UpdateAgentRequestTypedDict(TypedDict): instructions: NotRequired[Nullable[str]] r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentUpdateRequestToolTypedDict]] + tools: NotRequired[List[UpdateAgentRequestToolTypedDict]] r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" @@ -63,11 +63,11 @@ class AgentUpdateRequestTypedDict(TypedDict): version_message: NotRequired[Nullable[str]] -class AgentUpdateRequest(BaseModel): +class UpdateAgentRequest(BaseModel): instructions: OptionalNullable[str] = UNSET r"""Instruction prompt the model will follow during the conversation.""" - tools: Optional[List[AgentUpdateRequestTool]] = None + tools: Optional[List[UpdateAgentRequestTool]] = None r"""List of tools which are available to the model during the conversation.""" completion_args: Optional[CompletionArgs] = None @@ -89,50 +89,49 @@ class AgentUpdateRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "model", - "name", - "description", - "handoffs", - "deployment_chat", - "metadata", - "version_message", - ] - nullable_fields = [ - "instructions", - "model", - "name", - "description", - "handoffs", - "deployment_chat", - "metadata", - "version_message", - ] - null_default_fields = [] - + optional_fields = set( + [ + "instructions", + "tools", + "completion_args", + "model", + "name", + "description", + "handoffs", + "deployment_chat", + "metadata", + "version_message", + ] + ) + nullable_fields = set( + [ + "instructions", + "model", + "name", + "description", + "handoffs", + "deployment_chat", + "metadata", + "version_message", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/documentupdatein.py b/src/mistralai/client/models/updatedocumentrequest.py similarity index 60% rename from src/mistralai/client/models/documentupdatein.py rename to src/mistralai/client/models/updatedocumentrequest.py index 669554de..61e69655 100644 --- a/src/mistralai/client/models/documentupdatein.py +++ b/src/mistralai/client/models/updatedocumentrequest.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: d19c1b26a875 +# @generated-id: a8cfda07d337 from __future__ import annotations from datetime import datetime @@ -31,42 +31,37 @@ ) -class DocumentUpdateInTypedDict(TypedDict): +class UpdateDocumentRequestTypedDict(TypedDict): name: NotRequired[Nullable[str]] attributes: NotRequired[Nullable[Dict[str, AttributesTypedDict]]] -class DocumentUpdateIn(BaseModel): +class UpdateDocumentRequest(BaseModel): name: OptionalNullable[str] = UNSET attributes: OptionalNullable[Dict[str, Attributes]] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["name", "attributes"] - nullable_fields = ["name", "attributes"] - null_default_fields = [] - + optional_fields = set(["name", "attributes"]) + nullable_fields = set(["name", "attributes"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/updateftmodelin.py b/src/mistralai/client/models/updateftmodelin.py deleted file mode 100644 index 4ac5a8a2..00000000 --- a/src/mistralai/client/models/updateftmodelin.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 39e2d678e651 - -from __future__ import annotations -from mistralai.client.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class UpdateFTModelInTypedDict(TypedDict): - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - - -class UpdateFTModelIn(BaseModel): - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["name", "description"] - nullable_fields = ["name", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/client/models/updatelibraryrequest.py b/src/mistralai/client/models/updatelibraryrequest.py new file mode 100644 index 00000000..91cbf2a1 --- /dev/null +++ b/src/mistralai/client/models/updatelibraryrequest.py @@ -0,0 +1,49 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 51bc63885337 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class UpdateLibraryRequestTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class UpdateLibraryRequest(BaseModel): + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["name", "description"]) + nullable_fields = set(["name", "description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/updatemodelop.py b/src/mistralai/client/models/updatemodelop.py deleted file mode 100644 index 023be979..00000000 --- a/src/mistralai/client/models/updatemodelop.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ba149ecfe03e - -from __future__ import annotations -from .classifierftmodelout import ClassifierFTModelOut, ClassifierFTModelOutTypedDict -from .completionftmodelout import CompletionFTModelOut, CompletionFTModelOutTypedDict -from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict -from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from pydantic import Field -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class UpdateModelRequestTypedDict(TypedDict): - model_id: str - r"""The ID of the model to update.""" - update_ft_model_in: UpdateFTModelInTypedDict - - -class UpdateModelRequest(BaseModel): - model_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the model to update.""" - - update_ft_model_in: Annotated[ - UpdateFTModelIn, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] - - -UpdateModelResponseTypedDict = TypeAliasType( - "UpdateModelResponseTypedDict", - Union[CompletionFTModelOutTypedDict, ClassifierFTModelOutTypedDict], -) -r"""OK""" - - -UpdateModelResponse = Annotated[ - Union[ClassifierFTModelOut, CompletionFTModelOut], Field(discriminator="MODEL_TYPE") -] -r"""OK""" diff --git a/src/mistralai/client/models/updatemodelrequest.py b/src/mistralai/client/models/updatemodelrequest.py new file mode 100644 index 00000000..f685cfcc --- /dev/null +++ b/src/mistralai/client/models/updatemodelrequest.py @@ -0,0 +1,49 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fe649967751e + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class UpdateModelRequestTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class UpdateModelRequest(BaseModel): + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["name", "description"]) + nullable_fields = set(["name", "description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/usageinfo.py b/src/mistralai/client/models/usageinfo.py index e78f92e7..31cbf07e 100644 --- a/src/mistralai/client/models/usageinfo.py +++ b/src/mistralai/client/models/usageinfo.py @@ -46,37 +46,34 @@ def additional_properties(self, value): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "prompt_tokens", - "completion_tokens", - "total_tokens", - "prompt_audio_seconds", - ] - nullable_fields = ["prompt_audio_seconds"] - null_default_fields = [] - + optional_fields = set( + [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + ) + nullable_fields = set(["prompt_audio_seconds"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val for k, v in serialized.items(): m[k] = v diff --git a/src/mistralai/client/models/usermessage.py b/src/mistralai/client/models/usermessage.py index 25ccdf80..63e76792 100644 --- a/src/mistralai/client/models/usermessage.py +++ b/src/mistralai/client/models/usermessage.py @@ -28,37 +28,27 @@ class UserMessageTypedDict(TypedDict): class UserMessage(BaseModel): content: Nullable[UserMessageContent] - ROLE: Annotated[ + role: Annotated[ Annotated[Literal["user"], AfterValidator(validate_const("user"))], pydantic.Field(alias="role"), ] = "user" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["content"] - null_default_fields = [] - serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): + if val != UNSET_SENTINEL: m[k] = val return m + + +try: + UserMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/wandbintegration.py b/src/mistralai/client/models/wandbintegration.py index c5db4a6d..f0df2c77 100644 --- a/src/mistralai/client/models/wandbintegration.py +++ b/src/mistralai/client/models/wandbintegration.py @@ -35,7 +35,7 @@ class WandbIntegration(BaseModel): api_key: str r"""The WandB API key to use for authentication.""" - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["wandb"], AfterValidator(validate_const("wandb"))], pydantic.Field(alias="type"), ] = "wandb" @@ -47,30 +47,31 @@ class WandbIntegration(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["name", "run_name"] - nullable_fields = ["name", "run_name"] - null_default_fields = [] - + optional_fields = set(["name", "run_name"]) + nullable_fields = set(["name", "run_name"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + WandbIntegration.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/wandbintegrationout.py b/src/mistralai/client/models/wandbintegrationresult.py similarity index 65% rename from src/mistralai/client/models/wandbintegrationout.py rename to src/mistralai/client/models/wandbintegrationresult.py index d0a09bf4..575cbd42 100644 --- a/src/mistralai/client/models/wandbintegrationout.py +++ b/src/mistralai/client/models/wandbintegrationresult.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 6b103d74195c +# @generated-id: 8787b4ad5458 from __future__ import annotations from mistralai.client.types import ( @@ -17,7 +17,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class WandbIntegrationOutTypedDict(TypedDict): +class WandbIntegrationResultTypedDict(TypedDict): project: str r"""The name of the project that the new run will be created under.""" type: Literal["wandb"] @@ -27,11 +27,11 @@ class WandbIntegrationOutTypedDict(TypedDict): url: NotRequired[Nullable[str]] -class WandbIntegrationOut(BaseModel): +class WandbIntegrationResult(BaseModel): project: str r"""The name of the project that the new run will be created under.""" - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["wandb"], AfterValidator(validate_const("wandb"))], pydantic.Field(alias="type"), ] = "wandb" @@ -45,30 +45,31 @@ class WandbIntegrationOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["name", "run_name", "url"] - nullable_fields = ["name", "run_name", "url"] - null_default_fields = [] - + optional_fields = set(["name", "run_name", "url"]) + nullable_fields = set(["name", "run_name", "url"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + WandbIntegrationResult.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/websearchpremiumtool.py b/src/mistralai/client/models/websearchpremiumtool.py index 9588ab1d..00d4a4b4 100644 --- a/src/mistralai/client/models/websearchpremiumtool.py +++ b/src/mistralai/client/models/websearchpremiumtool.py @@ -2,23 +2,65 @@ # @generated-id: bfe88af887e3 from __future__ import annotations -from mistralai.client.types import BaseModel +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal -from typing_extensions import Annotated, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict class WebSearchPremiumToolTypedDict(TypedDict): + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] type: Literal["web_search_premium"] class WebSearchPremiumTool(BaseModel): - TYPE: Annotated[ + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ Annotated[ Literal["web_search_premium"], AfterValidator(validate_const("web_search_premium")), ], pydantic.Field(alias="type"), ] = "web_search_premium" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + WebSearchPremiumTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/websearchtool.py b/src/mistralai/client/models/websearchtool.py index 27502909..6871080f 100644 --- a/src/mistralai/client/models/websearchtool.py +++ b/src/mistralai/client/models/websearchtool.py @@ -2,20 +2,62 @@ # @generated-id: 26b0903423e5 from __future__ import annotations -from mistralai.client.types import BaseModel +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal -from typing_extensions import Annotated, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict class WebSearchToolTypedDict(TypedDict): + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] type: Literal["web_search"] class WebSearchTool(BaseModel): - TYPE: Annotated[ + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ Annotated[Literal["web_search"], AfterValidator(validate_const("web_search"))], pydantic.Field(alias="type"), ] = "web_search" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + WebSearchTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models_.py b/src/mistralai/client/models_.py index 05b33ac7..a287c413 100644 --- a/src/mistralai/client/models_.py +++ b/src/mistralai/client/models_.py @@ -2,7 +2,7 @@ # @generated-id: 1d277958a843 from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env @@ -68,7 +68,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListModels", + operation_id="list_models_v1_models_get", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -83,12 +83,12 @@ def list( return unmarshal_json_response(models.ModelList, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -145,7 +145,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListModels", + operation_id="list_models_v1_models_get", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -160,12 +160,12 @@ async def list_async( return unmarshal_json_response(models.ModelList, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def retrieve( self, @@ -196,7 +196,7 @@ def retrieve( else: base_url = self._get_url(base_url, url_variables) - request = models.RetrieveModelRequest( + request = models.RetrieveModelV1ModelsModelIDGetRequest( model_id=model_id, ) @@ -229,7 +229,7 @@ def retrieve( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="RetrieveModel", + operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -247,17 +247,17 @@ def retrieve( ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def retrieve_async( self, @@ -288,7 +288,7 @@ async def retrieve_async( else: base_url = self._get_url(base_url, url_variables) - request = models.RetrieveModelRequest( + request = models.RetrieveModelV1ModelsModelIDGetRequest( model_id=model_id, ) @@ -321,7 +321,7 @@ async def retrieve_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="RetrieveModel", + operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -339,17 +339,17 @@ async def retrieve_async( ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, @@ -380,7 +380,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteModelRequest( + request = models.DeleteModelV1ModelsModelIDDeleteRequest( model_id=model_id, ) @@ -413,7 +413,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteModel", + operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -429,17 +429,17 @@ def delete( return unmarshal_json_response(models.DeleteModelOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -470,7 +470,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteModelRequest( + request = models.DeleteModelV1ModelsModelIDDeleteRequest( model_id=model_id, ) @@ -503,7 +503,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteModel", + operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -519,17 +519,17 @@ async def delete_async( return unmarshal_json_response(models.DeleteModelOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def update( self, @@ -541,7 +541,7 @@ def update( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UpdateModelResponse: + ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: r"""Update Fine Tuned Model Update a model name or description. @@ -564,9 +564,9 @@ def update( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateModelRequest( + request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, - update_ft_model_in=models.UpdateFTModelIn( + update_model_request=models.UpdateModelRequest( name=name, description=description, ), @@ -586,7 +586,11 @@ def update( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn + request.update_model_request, + False, + False, + "json", + models.UpdateModelRequest, ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -604,7 +608,7 @@ def update( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateModel", + operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -616,15 +620,17 @@ def update( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UpdateModelResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def update_async( self, @@ -636,7 +642,7 @@ async def update_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UpdateModelResponse: + ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: r"""Update Fine Tuned Model Update a model name or description. @@ -659,9 +665,9 @@ async def update_async( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateModelRequest( + request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, - update_ft_model_in=models.UpdateFTModelIn( + update_model_request=models.UpdateModelRequest( name=name, description=description, ), @@ -681,7 +687,11 @@ async def update_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn + request.update_model_request, + False, + False, + "json", + models.UpdateModelRequest, ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -699,7 +709,7 @@ async def update_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateModel", + operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -711,15 +721,17 @@ async def update_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UpdateModelResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def archive( self, @@ -729,7 +741,7 @@ def archive( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ArchiveFTModelOut: + ) -> models.ArchiveModelResponse: r"""Archive Fine Tuned Model Archive a fine-tuned model. @@ -750,7 +762,7 @@ def archive( else: base_url = self._get_url(base_url, url_variables) - request = models.ArchiveModelRequest( + request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( model_id=model_id, ) @@ -783,7 +795,7 @@ def archive( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ArchiveModel", + operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -795,15 +807,15 @@ def archive( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ArchiveFTModelOut, http_res) + return unmarshal_json_response(models.ArchiveModelResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def archive_async( self, @@ -813,7 +825,7 @@ async def archive_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ArchiveFTModelOut: + ) -> models.ArchiveModelResponse: r"""Archive Fine Tuned Model Archive a fine-tuned model. @@ -834,7 +846,7 @@ async def archive_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ArchiveModelRequest( + request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( model_id=model_id, ) @@ -867,7 +879,7 @@ async def archive_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ArchiveModel", + operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -879,15 +891,15 @@ async def archive_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ArchiveFTModelOut, http_res) + return unmarshal_json_response(models.ArchiveModelResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def unarchive( self, @@ -897,7 +909,7 @@ def unarchive( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UnarchiveFTModelOut: + ) -> models.UnarchiveModelResponse: r"""Unarchive Fine Tuned Model Un-archive a fine-tuned model. @@ -918,7 +930,7 @@ def unarchive( else: base_url = self._get_url(base_url, url_variables) - request = models.UnarchiveModelRequest( + request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( model_id=model_id, ) @@ -951,7 +963,7 @@ def unarchive( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UnarchiveModel", + operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -963,15 +975,15 @@ def unarchive( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UnarchiveFTModelOut, http_res) + return unmarshal_json_response(models.UnarchiveModelResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def unarchive_async( self, @@ -981,7 +993,7 @@ async def unarchive_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UnarchiveFTModelOut: + ) -> models.UnarchiveModelResponse: r"""Unarchive Fine Tuned Model Un-archive a fine-tuned model. @@ -1002,7 +1014,7 @@ async def unarchive_async( else: base_url = self._get_url(base_url, url_variables) - request = models.UnarchiveModelRequest( + request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( model_id=model_id, ) @@ -1035,7 +1047,7 @@ async def unarchive_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UnarchiveModel", + operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1047,12 +1059,12 @@ async def unarchive_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UnarchiveFTModelOut, http_res) + return unmarshal_json_response(models.UnarchiveModelResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/ocr.py b/src/mistralai/client/ocr.py index 2aa38229..a46119d1 100644 --- a/src/mistralai/client/ocr.py +++ b/src/mistralai/client/ocr.py @@ -2,12 +2,8 @@ # @generated-id: 2f804a12fc62 from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - ocrrequest as models_ocrrequest, - responseformat as models_responseformat, -) from mistralai.client.types import Nullable, OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -21,28 +17,20 @@ def process( self, *, model: Nullable[str], - document: Union[ - models_ocrrequest.Document, models_ocrrequest.DocumentTypedDict - ], + document: Union[models.DocumentUnion, models.DocumentUnionTypedDict], id: Optional[str] = None, pages: OptionalNullable[List[int]] = UNSET, include_image_base64: OptionalNullable[bool] = UNSET, image_limit: OptionalNullable[int] = UNSET, image_min_size: OptionalNullable[int] = UNSET, bbox_annotation_format: OptionalNullable[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = UNSET, document_annotation_format: OptionalNullable[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = UNSET, document_annotation_prompt: OptionalNullable[str] = UNSET, - table_format: OptionalNullable[models_ocrrequest.TableFormat] = UNSET, + table_format: OptionalNullable[models.TableFormat] = UNSET, extract_header: Optional[bool] = None, extract_footer: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -83,7 +71,7 @@ def process( request = models.OCRRequest( model=model, id=id, - document=utils.get_pydantic_model(document, models.Document), + document=utils.get_pydantic_model(document, models.DocumentUnion), pages=pages, include_image_base64=include_image_base64, image_limit=image_limit, @@ -148,44 +136,36 @@ def process( return unmarshal_json_response(models.OCRResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def process_async( self, *, model: Nullable[str], - document: Union[ - models_ocrrequest.Document, models_ocrrequest.DocumentTypedDict - ], + document: Union[models.DocumentUnion, models.DocumentUnionTypedDict], id: Optional[str] = None, pages: OptionalNullable[List[int]] = UNSET, include_image_base64: OptionalNullable[bool] = UNSET, image_limit: OptionalNullable[int] = UNSET, image_min_size: OptionalNullable[int] = UNSET, bbox_annotation_format: OptionalNullable[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = UNSET, document_annotation_format: OptionalNullable[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = UNSET, document_annotation_prompt: OptionalNullable[str] = UNSET, - table_format: OptionalNullable[models_ocrrequest.TableFormat] = UNSET, + table_format: OptionalNullable[models.TableFormat] = UNSET, extract_header: Optional[bool] = None, extract_footer: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -226,7 +206,7 @@ async def process_async( request = models.OCRRequest( model=model, id=id, - document=utils.get_pydantic_model(document, models.Document), + document=utils.get_pydantic_model(document, models.DocumentUnion), pages=pages, include_image_base64=include_image_base64, image_limit=image_limit, @@ -291,14 +271,14 @@ async def process_async( return unmarshal_json_response(models.OCRResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/sdk.py b/src/mistralai/client/sdk.py index b1ab5493..80bf25a7 100644 --- a/src/mistralai/client/sdk.py +++ b/src/mistralai/client/sdk.py @@ -8,7 +8,7 @@ from .utils.retries import RetryConfig import httpx import importlib -from mistralai.client import models, utils +from mistralai.client import models as models_, utils from mistralai.client._hooks import SDKHooks from mistralai.client.types import OptionalNullable, UNSET import sys @@ -31,10 +31,7 @@ class Mistral(BaseSDK): - r"""Mistral AI API: Dora OpenAPI schema - - Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. - """ + r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" models: "Models" r"""Model Management API""" @@ -118,9 +115,9 @@ def __init__( security: Any = None if callable(api_key): # pylint: disable=unnecessary-lambda-assignment - security = lambda: models.Security(api_key=api_key()) + security = lambda: models_.Security(api_key=api_key()) else: - security = models.Security(api_key=api_key) + security = models_.Security(api_key=api_key) if server_url is not None: if url_params is not None: diff --git a/src/mistralai/client/transcriptions.py b/src/mistralai/client/transcriptions.py index f7ef5b0a..7f01917d 100644 --- a/src/mistralai/client/transcriptions.py +++ b/src/mistralai/client/transcriptions.py @@ -2,12 +2,8 @@ # @generated-id: 75b45780c978 from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - file as models_file, - timestampgranularity as models_timestampgranularity, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import eventstreaming, get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -21,16 +17,14 @@ def complete( self, *, model: str, - file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, + file: Optional[Union[models.File, models.FileTypedDict]] = None, file_url: OptionalNullable[str] = UNSET, file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, diarize: Optional[bool] = False, context_bias: Optional[List[str]] = None, - timestamp_granularities: Optional[ - List[models_timestampgranularity.TimestampGranularity] - ] = None, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -121,27 +115,25 @@ def complete( return unmarshal_json_response(models.TranscriptionResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def complete_async( self, *, model: str, - file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, + file: Optional[Union[models.File, models.FileTypedDict]] = None, file_url: OptionalNullable[str] = UNSET, file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, diarize: Optional[bool] = False, context_bias: Optional[List[str]] = None, - timestamp_granularities: Optional[ - List[models_timestampgranularity.TimestampGranularity] - ] = None, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -232,27 +224,25 @@ async def complete_async( return unmarshal_json_response(models.TranscriptionResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def stream( self, *, model: str, - file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, + file: Optional[Union[models.File, models.FileTypedDict]] = None, file_url: OptionalNullable[str] = UNSET, file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, diarize: Optional[bool] = False, context_bias: Optional[List[str]] = None, - timestamp_granularities: Optional[ - List[models_timestampgranularity.TimestampGranularity] - ] = None, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -352,28 +342,26 @@ def stream( ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, *, model: str, - file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, + file: Optional[Union[models.File, models.FileTypedDict]] = None, file_url: OptionalNullable[str] = UNSET, file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, diarize: Optional[bool] = False, context_bias: Optional[List[str]] = None, - timestamp_granularities: Optional[ - List[models_timestampgranularity.TimestampGranularity] - ] = None, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -473,10 +461,10 @@ async def stream_async( ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/utils/__init__.py b/src/mistralai/client/utils/__init__.py index 7ed3a420..4bde281a 100644 --- a/src/mistralai/client/utils/__init__.py +++ b/src/mistralai/client/utils/__init__.py @@ -1,15 +1,24 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" # @generated-id: b69505f4b269 -from typing import TYPE_CHECKING -from importlib import import_module -import builtins -import sys +from typing import Any, TYPE_CHECKING, Callable, TypeVar +import asyncio + +from .dynamic_imports import lazy_getattr, lazy_dir + +_T = TypeVar("_T") + + +async def run_sync_in_thread(func: Callable[..., _T], *args) -> _T: + """Run a synchronous function in a thread pool to avoid blocking the event loop.""" + return await asyncio.to_thread(func, *args) + if TYPE_CHECKING: from .annotations import get_discriminator from .datetimes import parse_datetime from .enums import OpenEnumMeta + from .unions import parse_open_union from .headers import get_headers, get_response_headers from .metadata import ( FieldMetadata, @@ -79,6 +88,7 @@ "match_response", "MultipartFormMetadata", "OpenEnumMeta", + "parse_open_union", "PathParamMetadata", "QueryParamMetadata", "remove_suffix", @@ -132,6 +142,7 @@ "match_response": ".values", "MultipartFormMetadata": ".metadata", "OpenEnumMeta": ".enums", + "parse_open_union": ".unions", "PathParamMetadata": ".metadata", "QueryParamMetadata": ".metadata", "remove_suffix": ".url", @@ -161,38 +172,11 @@ } -def dynamic_import(modname, retries=3): - for attempt in range(retries): - try: - return import_module(modname, __package__) - except KeyError: - # Clear any half-initialized module and retry - sys.modules.pop(modname, None) - if attempt == retries - 1: - break - raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") - - -def __getattr__(attr_name: str) -> object: - module_name = _dynamic_imports.get(attr_name) - if module_name is None: - raise AttributeError( - f"no {attr_name} found in _dynamic_imports, module name -> {__name__} " - ) - - try: - module = dynamic_import(module_name) - return getattr(module, attr_name) - except ImportError as e: - raise ImportError( - f"Failed to import {attr_name} from {module_name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to get {attr_name} from {module_name}: {e}" - ) from e +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) def __dir__(): - lazy_attrs = builtins.list(_dynamic_imports.keys()) - return builtins.sorted(lazy_attrs) + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/src/mistralai/client/utils/dynamic_imports.py b/src/mistralai/client/utils/dynamic_imports.py new file mode 100644 index 00000000..969f2fc7 --- /dev/null +++ b/src/mistralai/client/utils/dynamic_imports.py @@ -0,0 +1,55 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ac9918d925c0 + +from importlib import import_module +import builtins +import sys + + +def dynamic_import(package, modname, retries=3): + """Import a module relative to package, retrying on KeyError from half-initialized modules.""" + for attempt in range(retries): + try: + return import_module(modname, package) + except KeyError: + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def lazy_getattr(attr_name, *, package, dynamic_imports, sub_packages=None): + """Module-level __getattr__ that lazily loads from a dynamic_imports mapping. + + Args: + attr_name: The attribute being looked up. + package: The caller's __package__ (for relative imports). + dynamic_imports: Dict mapping attribute names to relative module paths. + sub_packages: Optional list of subpackage names to lazy-load. + """ + module_name = dynamic_imports.get(attr_name) + if module_name is not None: + try: + module = dynamic_import(package, module_name) + return getattr(module, attr_name) + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + if sub_packages and attr_name in sub_packages: + return import_module(f".{attr_name}", package) + + raise AttributeError(f"module '{package}' has no attribute '{attr_name}'") + + +def lazy_dir(*, dynamic_imports, sub_packages=None): + """Module-level __dir__ that lists lazily-loadable attributes.""" + lazy_attrs = builtins.list(dynamic_imports.keys()) + if sub_packages: + lazy_attrs.extend(sub_packages) + return builtins.sorted(lazy_attrs) diff --git a/src/mistralai/client/utils/eventstreaming.py b/src/mistralai/client/utils/eventstreaming.py index 3fe3c7e1..19a12152 100644 --- a/src/mistralai/client/utils/eventstreaming.py +++ b/src/mistralai/client/utils/eventstreaming.py @@ -3,7 +3,9 @@ import re import json +from dataclasses import dataclass, asdict from typing import ( + Any, Callable, Generic, TypeVar, @@ -23,6 +25,7 @@ class EventStream(Generic[T]): client_ref: Optional[object] response: httpx.Response generator: Generator[T, None, None] + _closed: bool def __init__( self, @@ -34,17 +37,21 @@ def __init__( self.response = response self.generator = stream_events(response, decoder, sentinel) self.client_ref = client_ref + self._closed = False def __iter__(self): return self def __next__(self): + if self._closed: + raise StopIteration return next(self.generator) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): + self._closed = True self.response.close() @@ -54,6 +61,7 @@ class EventStreamAsync(Generic[T]): client_ref: Optional[object] response: httpx.Response generator: AsyncGenerator[T, None] + _closed: bool def __init__( self, @@ -65,33 +73,45 @@ def __init__( self.response = response self.generator = stream_events_async(response, decoder, sentinel) self.client_ref = client_ref + self._closed = False def __aiter__(self): return self async def __anext__(self): + if self._closed: + raise StopAsyncIteration return await self.generator.__anext__() async def __aenter__(self): return self async def __aexit__(self, exc_type, exc_val, exc_tb): + self._closed = True await self.response.aclose() +@dataclass class ServerEvent: id: Optional[str] = None event: Optional[str] = None - data: Optional[str] = None + data: Any = None retry: Optional[int] = None MESSAGE_BOUNDARIES = [ b"\r\n\r\n", - b"\n\n", + b"\r\n\r", + b"\r\n\n", + b"\r\r\n", + b"\n\r\n", b"\r\r", + b"\n\r", + b"\n\n", ] +UTF8_BOM = b"\xef\xbb\xbf" + async def stream_events_async( response: httpx.Response, @@ -100,14 +120,10 @@ async def stream_events_async( ) -> AsyncGenerator[T, None]: buffer = bytearray() position = 0 - discard = False + event_id: Optional[str] = None async for chunk in response.aiter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] buffer += chunk for i in range(position, len(buffer)): char = buffer[i : i + 1] @@ -122,15 +138,22 @@ async def stream_events_async( block = buffer[position:i] position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event + if discard: + await response.aclose() + return if position > 0: buffer = buffer[position:] position = 0 - event, discard = _parse_event(buffer, decoder, sentinel) + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event @@ -142,14 +165,10 @@ def stream_events( ) -> Generator[T, None, None]: buffer = bytearray() position = 0 - discard = False + event_id: Optional[str] = None for chunk in response.iter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] buffer += chunk for i in range(position, len(buffer)): char = buffer[i : i + 1] @@ -164,22 +183,33 @@ def stream_events( block = buffer[position:i] position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event + if discard: + response.close() + return if position > 0: buffer = buffer[position:] position = 0 - event, discard = _parse_event(buffer, decoder, sentinel) + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event def _parse_event( - raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None -) -> Tuple[Optional[T], bool]: + *, + raw: bytearray, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + event_id: Optional[str] = None, +) -> Tuple[Optional[T], bool, Optional[str]]: block = raw.decode() lines = re.split(r"\r?\n|\r", block) publish = False @@ -190,13 +220,16 @@ def _parse_event( continue delim = line.find(":") - if delim <= 0: + if delim == 0: continue - field = line[0:delim] - value = line[delim + 1 :] if delim < len(line) - 1 else "" - if len(value) and value[0] == " ": - value = value[1:] + field = line + value = "" + if delim > 0: + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] if field == "event": event.event = value @@ -205,37 +238,36 @@ def _parse_event( data += value + "\n" publish = True elif field == "id": - event.id = value publish = True + if "\x00" not in value: + event_id = value elif field == "retry": - event.retry = int(value) if value.isdigit() else None + if value.isdigit(): + event.retry = int(value) publish = True + event.id = event_id + if sentinel and data == f"{sentinel}\n": - return None, True + return None, True, event_id if data: data = data[:-1] - event.data = data - - data_is_primitive = ( - data.isnumeric() or data == "true" or data == "false" or data == "null" - ) - data_is_json = ( - data.startswith("{") or data.startswith("[") or data.startswith('"') - ) - - if data_is_primitive or data_is_json: - try: - event.data = json.loads(data) - except Exception: - pass + try: + event.data = json.loads(data) + except json.JSONDecodeError: + event.data = data out = None if publish: - out = decoder(json.dumps(event.__dict__)) - - return out, False + out_dict = { + k: v + for k, v in asdict(event).items() + if v is not None or (k == "data" and data) + } + out = decoder(json.dumps(out_dict)) + + return out, False, event_id def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): diff --git a/src/mistralai/client/utils/forms.py b/src/mistralai/client/utils/forms.py index 2b474b9a..6facec53 100644 --- a/src/mistralai/client/utils/forms.py +++ b/src/mistralai/client/utils/forms.py @@ -143,7 +143,7 @@ def serialize_multipart_form( if field_metadata.file: if isinstance(val, List): # Handle array of files - array_field_name = f_name + "[]" + array_field_name = f_name for file_obj in val: if not _is_set(file_obj): continue @@ -186,7 +186,7 @@ def serialize_multipart_form( continue values.append(_val_to_string(value)) - array_field_name = f_name + "[]" + array_field_name = f_name form[array_field_name] = values else: form[f_name] = _val_to_string(val) diff --git a/src/mistralai/client/utils/retries.py b/src/mistralai/client/utils/retries.py index 90c008b0..bea13041 100644 --- a/src/mistralai/client/utils/retries.py +++ b/src/mistralai/client/utils/retries.py @@ -145,12 +145,7 @@ def do_request() -> httpx.Response: if res.status_code == parsed_code: raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: + except (httpx.NetworkError, httpx.TimeoutException) as exception: if retries.config.retry_connection_errors: raise @@ -194,12 +189,7 @@ async def do_request() -> httpx.Response: if res.status_code == parsed_code: raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: + except (httpx.NetworkError, httpx.TimeoutException) as exception: if retries.config.retry_connection_errors: raise diff --git a/src/mistralai/client/utils/security.py b/src/mistralai/client/utils/security.py index 4c73806d..d8b9d8fe 100644 --- a/src/mistralai/client/utils/security.py +++ b/src/mistralai/client/utils/security.py @@ -154,6 +154,8 @@ def _parse_security_scheme_value( elif scheme_type == "http": if sub_type == "bearer": headers[header_name] = _apply_bearer(value) + elif sub_type == "basic": + headers[header_name] = value elif sub_type == "custom": return else: diff --git a/src/mistralai/client/utils/unions.py b/src/mistralai/client/utils/unions.py new file mode 100644 index 00000000..14ef1bd5 --- /dev/null +++ b/src/mistralai/client/utils/unions.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d23713342634 + +from typing import Any + +from pydantic import BaseModel, TypeAdapter + + +def parse_open_union( + v: Any, + *, + disc_key: str, + variants: dict[str, Any], + unknown_cls: type, + union_name: str, +) -> Any: + """Parse an open discriminated union value with forward-compatibility. + + Known discriminator values are dispatched to their variant types. + Unknown discriminator values produce an instance of the fallback class, + preserving the raw payload for inspection. + """ + if isinstance(v, BaseModel): + return v + if not isinstance(v, dict) or disc_key not in v: + raise ValueError(f"{union_name}: expected object with '{disc_key}' field") + disc = v[disc_key] + variant_cls = variants.get(disc) + if variant_cls is not None: + if isinstance(variant_cls, type) and issubclass(variant_cls, BaseModel): + return variant_cls.model_validate(v) + return TypeAdapter(variant_cls).validate_python(v) + return unknown_cls(raw=v) diff --git a/src/mistralai/client/utils/unmarshal_json_response.py b/src/mistralai/client/utils/unmarshal_json_response.py index 65190e5c..624433c4 100644 --- a/src/mistralai/client/utils/unmarshal_json_response.py +++ b/src/mistralai/client/utils/unmarshal_json_response.py @@ -6,7 +6,7 @@ import httpx from .serializers import unmarshal_json -from mistralai.client import models +from mistralai.client import errors T = TypeVar("T") @@ -31,7 +31,7 @@ def unmarshal_json_response( try: return unmarshal_json(body, typ) except Exception as e: - raise models.ResponseValidationError( + raise errors.ResponseValidationError( "Response validation failed", http_res, e, diff --git a/src/mistralai/extra/run/context.py b/src/mistralai/extra/run/context.py index 01baa6a9..7ade705f 100644 --- a/src/mistralai/extra/run/context.py +++ b/src/mistralai/extra/run/context.py @@ -22,7 +22,6 @@ create_tool_call, ) from mistralai.client.models import ( - AgentTool, CompletionArgs, CompletionArgsTypedDict, ConversationInputs, @@ -35,6 +34,8 @@ InputEntries, MessageInputEntry, ResponseFormat, + UnknownAgentTool, + UpdateAgentRequestTool, ) from mistralai.client.types.basemodel import BaseModel, OptionalNullable, UNSET @@ -187,8 +188,11 @@ async def prepare_agent_request(self, beta_client: "Beta") -> AgentRequestKwargs ) agent = await beta_client.agents.get_async(agent_id=self.agent_id) agent_tools = agent.tools or [] - updated_tools: list[AgentTool] = [] + updated_tools: list[UpdateAgentRequestTool] = [] for tool in agent_tools: + if isinstance(tool, UnknownAgentTool): + # Skip unknown tools - can't include them in update request + continue if not isinstance(tool, FunctionTool): updated_tools.append(tool) elif tool.function.name in self._callable_tools: From 19f29d139211213280bae77e58aaf1848e967d88 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 25 Feb 2026 19:00:49 +0100 Subject: [PATCH 29/42] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20-?= =?UTF-8?q?=20Generate=20MISTRALAI=20MISTRALAI-SDK=202.0.0b1=20(#372)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.729.0 * chore: set version to 2.0.0b1 --------- Co-authored-by: speakeasybot Co-authored-by: Louis Sanna --- .speakeasy/gen.lock | 15 ++++++++------- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 3 ++- RELEASES.md | 12 +++++++++++- pyproject.toml | 2 +- src/mistralai/client/_version.py | 4 ++-- uv.lock | 2 +- 7 files changed, 26 insertions(+), 14 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 678c20f2..7314b7b1 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -5,15 +5,15 @@ management: docVersion: 1.0.0 speakeasyVersion: 1.729.0 generationVersion: 2.841.0 - releaseVersion: 2.0.0-a3.1 - configChecksum: 134292298710eaf25a0f90f7097e648f + releaseVersion: 2.0.0b1 + configChecksum: 871b5a7d3687bd2a9ebd0e205e4b36a3 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true persistentEdits: - generation_id: 21ec746f-e476-468a-bb8e-c942c0997501 - pristine_commit_hash: 99ae95385eb06175841ba19bef78319a5921c585 - pristine_tree_hash: 5b06b6f5add0cd16af8139d524a42368532441c6 + generation_id: 1527268d-25cf-4a8c-8a67-09694eaf0d79 + pristine_commit_hash: 5642b69da5a9a00af1e84ca689b7587f2269d0c4 + pristine_tree_hash: f9eae7c82e85b3114e342a4b6500b9704a266493 features: python: additionalDependencies: 1.0.0 @@ -56,6 +56,7 @@ trackedFiles: id: 89aa447020cd last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + deleted: true USAGE.md: id: 3aed33ce6e6f last_write_checksum: sha1:50cc0351d6145a805d1d5ae8be4dfce58178e648 @@ -1606,8 +1607,8 @@ trackedFiles: pristine_git_object: 036d44b8cfc51599873bd5c401a6aed30450536c src/mistralai/client/_version.py: id: cc807b30de19 - last_write_checksum: sha1:03563b818feb27386f7d6a0321a3875e3024a2d2 - pristine_git_object: 1a4d15d66f45d13c7f9cae550138390b5cf5897e + last_write_checksum: sha1:f5109c91723cc927e8513ac9e637512edd91f04e + pristine_git_object: ab2cf01d06f4d4373b52373795db76aa40f00ceb src/mistralai/client/accesses.py: id: 76fc53bfcf59 last_write_checksum: sha1:ed94623aa8a2bd502572a699a2f54c9281ec283e diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 733650dc..e237388a 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -31,7 +31,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0-a3.1 + version: 2.0.0b1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index d051080f..e3ca5c59 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -18,6 +18,7 @@ sources: sourceBlobDigest: sha256:8566b35549178910c6fd4d005474d612bb9c476ef58785bb51c46251de145f71 tags: - latest + - speakeasy-sdk-regen-1772040743 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -39,7 +40,7 @@ targets: sourceRevisionDigest: sha256:4f8e25101b35a66b9c93089fe3d491990268bdbefb70a349740e01ba9c8e28f8 sourceBlobDigest: sha256:8566b35549178910c6fd4d005474d612bb9c476ef58785bb51c46251de145f71 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:f3cf9d6d99a27d6e753bd6e1a2f2c2fb290f412a455576de4bab610ab4825939 + codeSamplesRevisionDigest: sha256:0bcecf3d1523375a194d6aa13116ffba291da8321e44b01399ae5e24f7ce2e33 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.729.0 diff --git a/RELEASES.md b/RELEASES.md index 90f534ef..48b65760 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -368,4 +368,14 @@ Based on: ### Generated - [python v1.12.0] . ### Releases -- [PyPI v1.12.0] https://pypi.org/project/mistralai/1.12.0 - . \ No newline at end of file +- [PyPI v1.12.0] https://pypi.org/project/mistralai/1.12.0 - . + +## 2026-02-25 17:32:05 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0b1] . +### Releases +- [PyPI v2.0.0b1] https://pypi.org/project/mistralai/2.0.0b1 - . \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index c1762f0a..c42e4260 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "2.0.0a4" +version = "2.0.0b1" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/src/mistralai/client/_version.py b/src/mistralai/client/_version.py index 1a4d15d6..ab2cf01d 100644 --- a/src/mistralai/client/_version.py +++ b/src/mistralai/client/_version.py @@ -4,10 +4,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "2.0.0-a3.1" +__version__: str = "2.0.0b1" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.841.0" -__user_agent__: str = "speakeasy-sdk/python 2.0.0-a3.1 2.841.0 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 2.0.0b1 2.841.0 1.0.0 mistralai" try: if __package__ is not None: diff --git a/uv.lock b/uv.lock index 8c689c4a..1a37a7d6 100644 --- a/uv.lock +++ b/uv.lock @@ -563,7 +563,7 @@ wheels = [ [[package]] name = "mistralai" -version = "2.0.0a4" +version = "2.0.0b1" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, From 4b7ac55d694f8cdaaae3226611fdd46b1a81b8d8 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 25 Feb 2026 19:26:02 +0100 Subject: [PATCH 30/42] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20-?= =?UTF-8?q?=20Generate=20MISTRAL-PYTHON-SDK-GOOGLE-CLOUD=20MISTRALAI-GCP-S?= =?UTF-8?q?DK=202.0.0b1=20(#373)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.729.0 * chore: set version to 2.0.0b1 --------- Co-authored-by: speakeasybot Co-authored-by: Louis Sanna --- .speakeasy/workflow.lock | 3 ++- packages/gcp/.speakeasy/gen.lock | 14 +++++++------- packages/gcp/.speakeasy/gen.yaml | 2 +- packages/gcp/RELEASES.md | 12 +++++++++++- packages/gcp/pyproject.toml | 2 +- packages/gcp/src/mistralai/gcp/client/_version.py | 4 ++-- packages/gcp/uv.lock | 2 +- 7 files changed, 25 insertions(+), 14 deletions(-) diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index e3ca5c59..bbad9734 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -12,6 +12,7 @@ sources: sourceBlobDigest: sha256:5a558d5ea7a936723c7a5540db5a1fba63d85d25b453372e1cf16395b30c98d3 tags: - latest + - speakeasy-sdk-regen-1772041030 mistral-openapi: sourceNamespace: mistral-openapi sourceRevisionDigest: sha256:4f8e25101b35a66b9c93089fe3d491990268bdbefb70a349740e01ba9c8e28f8 @@ -33,7 +34,7 @@ targets: sourceRevisionDigest: sha256:4d9938ab74c4d41d62cd24234c8b8109e286c4aeec093e21d369259a43173113 sourceBlobDigest: sha256:5a558d5ea7a936723c7a5540db5a1fba63d85d25b453372e1cf16395b30c98d3 codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:f6c4dc988e9b7be6f8d8087d14b2269be601bb9bff2227b07e1018efe88e1556 + codeSamplesRevisionDigest: sha256:35f30ba8ce4bd70f58b6abc5222d0bbf82eecc3109b09ca99df4406e363e21a0 mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi diff --git a/packages/gcp/.speakeasy/gen.lock b/packages/gcp/.speakeasy/gen.lock index 517e1a85..6e33773d 100644 --- a/packages/gcp/.speakeasy/gen.lock +++ b/packages/gcp/.speakeasy/gen.lock @@ -5,16 +5,16 @@ management: docVersion: 1.0.0 speakeasyVersion: 1.729.0 generationVersion: 2.841.0 - releaseVersion: 2.0.0-a4.1 - configChecksum: bfe17061a2e5ac54039980ad7a48fd77 + releaseVersion: 2.0.0b1 + configChecksum: 9cea6a311ff15502c47b0ef87e9846a2 repoURL: https://github.com/mistralai/client-python.git repoSubDirectory: packages/gcp installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/gcp published: true persistentEdits: - generation_id: c7e2e696-b223-4993-a79b-2e6f15242c30 - pristine_commit_hash: 86953bc23bb7fcfc3c2525f79114411bc27e8f75 - pristine_tree_hash: 93675a8857b7519918499101d4a5e30fc7fe2c4a + generation_id: e503bb37-7bdd-4ebf-9bed-a8f754c99f8a + pristine_commit_hash: f14b1b1288437b7fc0ba666a384614a225385259 + pristine_tree_hash: 67e6d0a84ae20666a636dcc8ad174647a96b105f features: python: additionalDependencies: 1.0.0 @@ -326,8 +326,8 @@ trackedFiles: pristine_git_object: ea95bed210db9180824efddfb1b3e47f5bf96489 src/mistralai/gcp/client/_version.py: id: f87319e32c7b - last_write_checksum: sha1:85dd6da1d6503d717e8c9bd6d62278b469d3b464 - pristine_git_object: 204c92a656855ad281e86a74467e71ae1b04639f + last_write_checksum: sha1:0d99fadc73b957112022a95eabeb0e3a98d14ff4 + pristine_git_object: 36e44a5e6067e8bd197b38cc238686f660c77244 src/mistralai/gcp/client/basesdk.py: id: 4d594572857b last_write_checksum: sha1:d8ef9e2f4fa97d402eb9f5472ceb80fb39693991 diff --git a/packages/gcp/.speakeasy/gen.yaml b/packages/gcp/.speakeasy/gen.yaml index 54336636..18f4b4d5 100644 --- a/packages/gcp/.speakeasy/gen.yaml +++ b/packages/gcp/.speakeasy/gen.yaml @@ -30,7 +30,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0-a4.1 + version: 2.0.0b1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/gcp/RELEASES.md b/packages/gcp/RELEASES.md index b503c75f..ec883c62 100644 --- a/packages/gcp/RELEASES.md +++ b/packages/gcp/RELEASES.md @@ -8,4 +8,14 @@ Based on: ### Generated - [python v1.7.0] packages/mistralai_gcp ### Releases -- [PyPI v1.7.0] https://pypi.org/project/mistralai-gcp/1.7.0 - packages/mistralai_gcp \ No newline at end of file +- [PyPI v1.7.0] https://pypi.org/project/mistralai-gcp/1.7.0 - packages/mistralai_gcp + +## 2026-02-25 17:36:50 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0b1] packages/gcp +### Releases +- [PyPI v2.0.0b1] https://pypi.org/project/mistralai-gcp/2.0.0b1 - packages/gcp \ No newline at end of file diff --git a/packages/gcp/pyproject.toml b/packages/gcp/pyproject.toml index 98619ecd..c0497656 100644 --- a/packages/gcp/pyproject.toml +++ b/packages/gcp/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai-gcp" -version = "2.0.0a4" +version = "2.0.0b1" description = "Python Client SDK for the Mistral AI API in GCP." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/packages/gcp/src/mistralai/gcp/client/_version.py b/packages/gcp/src/mistralai/gcp/client/_version.py index 204c92a6..36e44a5e 100644 --- a/packages/gcp/src/mistralai/gcp/client/_version.py +++ b/packages/gcp/src/mistralai/gcp/client/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-gcp" -__version__: str = "2.0.0-a4.1" +__version__: str = "2.0.0b1" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.841.0" -__user_agent__: str = "speakeasy-sdk/python 2.0.0-a4.1 2.841.0 1.0.0 mistralai-gcp" +__user_agent__: str = "speakeasy-sdk/python 2.0.0b1 2.841.0 1.0.0 mistralai-gcp" try: if __package__ is not None: diff --git a/packages/gcp/uv.lock b/packages/gcp/uv.lock index a49757c9..9bd9f9b6 100644 --- a/packages/gcp/uv.lock +++ b/packages/gcp/uv.lock @@ -277,7 +277,7 @@ wheels = [ [[package]] name = "mistralai-gcp" -version = "2.0.0a4" +version = "2.0.0b1" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, From 3eaedb47bbdf810356fa80e3e920d572e0f0b70b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 25 Feb 2026 19:26:34 +0100 Subject: [PATCH 31/42] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20-?= =?UTF-8?q?=20Generate=20MISTRAL-PYTHON-SDK-AZURE=20MISTRALAI-AZURE-SDK=20?= =?UTF-8?q?2.0.0b1=20(#374)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.729.0 * chore: set version to 2.0.0b1 --------- Co-authored-by: speakeasybot Co-authored-by: Louis Sanna --- .speakeasy/workflow.lock | 3 ++- packages/azure/.speakeasy/gen.lock | 14 +++++++------- packages/azure/.speakeasy/gen.yaml | 2 +- packages/azure/RELEASES.md | 12 +++++++++++- packages/azure/pyproject.toml | 2 +- .../azure/src/mistralai/azure/client/_version.py | 4 ++-- packages/azure/uv.lock | 2 +- 7 files changed, 25 insertions(+), 14 deletions(-) diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index bbad9734..b26cdf2b 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -6,6 +6,7 @@ sources: sourceBlobDigest: sha256:2dad2b1b7a79de6917c363ce7e870d11efe31ac08e3bfe0258f72823fe1ad13e tags: - latest + - speakeasy-sdk-regen-1772041212 mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud sourceRevisionDigest: sha256:4d9938ab74c4d41d62cd24234c8b8109e286c4aeec093e21d369259a43173113 @@ -27,7 +28,7 @@ targets: sourceRevisionDigest: sha256:e32d21a6317d1bca6ab29f05603b96038e841752c2698aab47f434ea0d6530b7 sourceBlobDigest: sha256:2dad2b1b7a79de6917c363ce7e870d11efe31ac08e3bfe0258f72823fe1ad13e codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:248e5daaa44589805664ab1479502885758fde0f1da3b384b97b1a09d74c8256 + codeSamplesRevisionDigest: sha256:68866aada6ad13253e32dab06e4876a7aeba4d7759683d81b2ba27f0fb55a342 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud diff --git a/packages/azure/.speakeasy/gen.lock b/packages/azure/.speakeasy/gen.lock index c795c61c..5da824d1 100644 --- a/packages/azure/.speakeasy/gen.lock +++ b/packages/azure/.speakeasy/gen.lock @@ -5,16 +5,16 @@ management: docVersion: 1.0.0 speakeasyVersion: 1.729.0 generationVersion: 2.841.0 - releaseVersion: 2.0.0-a4.1 - configChecksum: e2523ba89eba35872d05ddb673dd862a + releaseVersion: 2.0.0b1 + configChecksum: 01160bf17a4abd1ce038528d20cd4685 repoURL: https://github.com/mistralai/client-python.git repoSubDirectory: packages/azure installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/azure published: true persistentEdits: - generation_id: 1812b54a-0aa7-4b43-8c53-d70427856543 - pristine_commit_hash: 28db2945de995b5707dc7f310b5291435aaafcbf - pristine_tree_hash: b01973b36166a61d38fa84cf7dae49b7a74e1402 + generation_id: 2f5b7e40-9bd2-4c96-9e97-16a92e4b44af + pristine_commit_hash: 480a8b0e23da7e4752e6ad5b36fc72651e09d2d7 + pristine_tree_hash: 8a4c9b9a253fbe496a52e0496fa7e58e91e32c7c features: python: additionalDependencies: 1.0.0 @@ -354,8 +354,8 @@ trackedFiles: pristine_git_object: 3e4e39555d60adebe84e596c8323ee5b80676fc9 src/mistralai/azure/client/_version.py: id: a77160e60e5d - last_write_checksum: sha1:b1d1971d43e8f92bd55bb45653a228fd9de97af3 - pristine_git_object: 4f985cc69c492521664044337e5910f8e5a26b90 + last_write_checksum: sha1:1b76e9448049c69dbdb690b9de25456378bba0a7 + pristine_git_object: 213648be87a19e24d87160c1286614b2d5df7344 src/mistralai/azure/client/basesdk.py: id: 5a585a95ec21 last_write_checksum: sha1:0c2e686aa42d6aeeb103193aa058d6ddff7bcf74 diff --git a/packages/azure/.speakeasy/gen.yaml b/packages/azure/.speakeasy/gen.yaml index 0b7262e0..55934cc8 100644 --- a/packages/azure/.speakeasy/gen.yaml +++ b/packages/azure/.speakeasy/gen.yaml @@ -30,7 +30,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0-a4.1 + version: 2.0.0b1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/azure/RELEASES.md b/packages/azure/RELEASES.md index e471af0f..e625ee98 100644 --- a/packages/azure/RELEASES.md +++ b/packages/azure/RELEASES.md @@ -8,4 +8,14 @@ Based on: ### Generated - [python v1.7.0] packages/mistralai_azure ### Releases -- [PyPI v1.7.0] https://pypi.org/project/mistralai_azure/1.7.0 - packages/mistralai_azure \ No newline at end of file +- [PyPI v1.7.0] https://pypi.org/project/mistralai_azure/1.7.0 - packages/mistralai_azure + +## 2026-02-25 17:39:51 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0b1] packages/azure +### Releases +- [PyPI v2.0.0b1] https://pypi.org/project/mistralai-azure/2.0.0b1 - packages/azure \ No newline at end of file diff --git a/packages/azure/pyproject.toml b/packages/azure/pyproject.toml index 3b9aa829..cf80bde8 100644 --- a/packages/azure/pyproject.toml +++ b/packages/azure/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai-azure" -version = "2.0.0a4" +version = "2.0.0b1" description = "Python Client SDK for the Mistral AI API in Azure." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/packages/azure/src/mistralai/azure/client/_version.py b/packages/azure/src/mistralai/azure/client/_version.py index 4f985cc6..213648be 100644 --- a/packages/azure/src/mistralai/azure/client/_version.py +++ b/packages/azure/src/mistralai/azure/client/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-azure" -__version__: str = "2.0.0-a4.1" +__version__: str = "2.0.0b1" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.841.0" -__user_agent__: str = "speakeasy-sdk/python 2.0.0-a4.1 2.841.0 1.0.0 mistralai-azure" +__user_agent__: str = "speakeasy-sdk/python 2.0.0b1 2.841.0 1.0.0 mistralai-azure" try: if __package__ is not None: diff --git a/packages/azure/uv.lock b/packages/azure/uv.lock index cedb1ce8..7c090c00 100644 --- a/packages/azure/uv.lock +++ b/packages/azure/uv.lock @@ -154,7 +154,7 @@ wheels = [ [[package]] name = "mistralai-azure" -version = "2.0.0a4" +version = "2.0.0b1" source = { editable = "." } dependencies = [ { name = "httpcore" }, From 7cb12a9cce93040acc6cc19708e09a3c90a60c92 Mon Sep 17 00:00:00 2001 From: Jean-Malo Delignon <56539593+jean-malo@users.noreply.github.com> Date: Thu, 26 Feb 2026 11:44:40 +0100 Subject: [PATCH 32/42] feat(realtime): add support for target streaming delay in realtime transcription (#376) * feat(realtime): add support for target streaming delay in realtime transcription This commit introduces the ability to specify a target streaming delay in milliseconds for realtime transcription sessions. The changes include: 1. Adding a new parameter `target_streaming_delay_ms` to the realtime transcription API 2. Updating the connection management to handle the new delay parameter 3. Adding support for audio flushing to ensure proper handling of streaming delays 4. Creating a new example demonstrating dual-delay transcription with two parallel streams 5. Improving error handling for microphone access and PyAudio loading The changes allow for more precise control over the latency/accuracy tradeoff in realtime transcription, enabling use cases that require different streaming delays for different purposes. * ci: skip dual-delay realtime example in run_examples --- ...ime_transcription_dual_delay_microphone.py | 473 ++++++++++++++++++ ...async_realtime_transcription_microphone.py | 32 +- .../async_realtime_transcription_stream.py | 7 + examples/mistral/audio/pyaudio_utils.py | 38 ++ scripts/run_examples.sh | 1 + src/mistralai/extra/realtime/connection.py | 61 ++- src/mistralai/extra/realtime/transcription.py | 11 +- 7 files changed, 600 insertions(+), 23 deletions(-) create mode 100644 examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py create mode 100644 examples/mistral/audio/pyaudio_utils.py diff --git a/examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py b/examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py new file mode 100644 index 00000000..7653b0ed --- /dev/null +++ b/examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py @@ -0,0 +1,473 @@ +#!/usr/bin/env python +# /// script +# requires-python = ">=3.9" +# dependencies = [ +# "mistralai[realtime]", +# "pyaudio", +# "rich", +# ] +# [tool.uv.sources] +# mistralai = { path = "../../..", editable = true } +# /// + +import argparse +import asyncio +import difflib +import os +import sys +from dataclasses import dataclass +from typing import AsyncIterator, Sequence + +from rich.align import Align +from rich.console import Console +from rich.layout import Layout +from rich.live import Live +from rich.panel import Panel +from rich.text import Text + +from mistralai.client import Mistral +from mistralai.extra.realtime import UnknownRealtimeEvent +from mistralai.client.models import ( + AudioFormat, + RealtimeTranscriptionError, + RealtimeTranscriptionSessionCreated, + TranscriptionStreamDone, + TranscriptionStreamTextDelta, +) + +from pyaudio_utils import load_pyaudio + +console = Console() + + +@dataclass +class DualTranscriptState: + """Tracks transcript state for dual-delay transcription.""" + + fast_full_text: str = "" + slow_full_text: str = "" + fast_status: str = "🔌 Connecting..." + slow_status: str = "🔌 Connecting..." + error: str | None = None + fast_done: bool = False + slow_done: bool = False + + def set_error(self, message: str) -> None: + self.error = message + self.fast_status = "❌ Error" + self.slow_status = "❌ Error" + + +class DualTranscriptDisplay: + """Renders a live dual-delay transcription UI.""" + + def __init__( + self, + *, + model: str, + fast_delay_ms: int, + slow_delay_ms: int, + state: DualTranscriptState, + ) -> None: + self.model = model + self.fast_delay_ms = fast_delay_ms + self.slow_delay_ms = slow_delay_ms + self.state = state + + @staticmethod + def _normalize_word(word: str) -> str: + return word.strip(".,!?;:\"'()[]{}").lower() + + def _compute_display_texts(self) -> tuple[str, str]: + slow_words = self.state.slow_full_text.split() + fast_words = self.state.fast_full_text.split() + + if not slow_words: + partial_text = f" {self.state.fast_full_text}".rstrip() + return "", partial_text + + slow_norm = [self._normalize_word(word) for word in slow_words] + fast_norm = [self._normalize_word(word) for word in fast_words] + + matcher = difflib.SequenceMatcher(None, slow_norm, fast_norm) + last_fast_index = 0 + slow_progress = 0 + for block in matcher.get_matching_blocks(): + if block.size == 0: + continue + slow_end = block.a + block.size + if slow_end > slow_progress: + slow_progress = slow_end + last_fast_index = block.b + block.size + + if last_fast_index < len(fast_words): + ahead_words = fast_words[last_fast_index:] + partial_text = " " + " ".join(ahead_words) if ahead_words else "" + else: + partial_text = "" + + return self.state.slow_full_text, partial_text + + @staticmethod + def _status_style(status: str) -> str: + if "Listening" in status: + return "green" + if "Connecting" in status: + return "yellow dim" + if "Done" in status or "Stopped" in status: + return "dim" + return "red" + + def render(self) -> Layout: + layout = Layout() + + header_text = Text() + header_text.append("│ ", style="dim") + header_text.append(self.model, style="dim") + header_text.append(" │ ", style="dim") + header_text.append( + f"fast {self.fast_delay_ms}ms", style="bright_yellow" + ) + header_text.append( + f" {self.state.fast_status}", + style=self._status_style(self.state.fast_status), + ) + header_text.append(" │ ", style="dim") + header_text.append(f"slow {self.slow_delay_ms}ms", style="white") + header_text.append( + f" {self.state.slow_status}", + style=self._status_style(self.state.slow_status), + ) + + header = Align.left(header_text, vertical="middle", pad=False) + + final_text, partial_text = self._compute_display_texts() + transcript_text = Text() + if final_text or partial_text: + transcript_text.append(final_text, style="white") + transcript_text.append(partial_text, style="bright_yellow") + else: + transcript_text.append("...", style="dim") + + transcript = Panel( + Align.left(transcript_text, vertical="top"), + border_style="dim", + padding=(1, 2), + ) + + footer_text = Text() + footer_text.append("ctrl+c", style="dim") + footer_text.append(" quit", style="dim italic") + footer = Align.left(footer_text, vertical="middle", pad=False) + + if self.state.error: + layout.split_column( + Layout(header, name="header", size=1), + Layout(transcript, name="body"), + Layout( + Panel(Text(self.state.error, style="red"), border_style="red"), + name="error", + size=4, + ), + Layout(footer, name="footer", size=1), + ) + else: + layout.split_column( + Layout(header, name="header", size=1), + Layout(transcript, name="body"), + Layout(footer, name="footer", size=1), + ) + + return layout + + +async def iter_microphone( + *, + sample_rate: int, + chunk_duration_ms: int, +) -> AsyncIterator[bytes]: + """ + Yield microphone PCM chunks using PyAudio (16-bit mono). + Encoding is always pcm_s16le. + """ + pyaudio = load_pyaudio() + + p = pyaudio.PyAudio() + chunk_samples = int(sample_rate * chunk_duration_ms / 1000) + + stream = p.open( + format=pyaudio.paInt16, + channels=1, + rate=sample_rate, + input=True, + frames_per_buffer=chunk_samples, + ) + + loop = asyncio.get_running_loop() + try: + while True: + data = await loop.run_in_executor(None, stream.read, chunk_samples, False) + yield data + finally: + stream.stop_stream() + stream.close() + p.terminate() + + +async def queue_audio_iter( + queue: asyncio.Queue[bytes | None], +) -> AsyncIterator[bytes]: + """Yield audio chunks from a queue until a None sentinel is received.""" + while True: + chunk = await queue.get() + if chunk is None: + break + yield chunk + + +async def broadcast_microphone( + *, + sample_rate: int, + chunk_duration_ms: int, + queues: Sequence[asyncio.Queue[bytes | None]], +) -> None: + """Read from the microphone once and broadcast to multiple queues.""" + try: + async for chunk in iter_microphone( + sample_rate=sample_rate, chunk_duration_ms=chunk_duration_ms + ): + for queue in queues: + await queue.put(chunk) + finally: + for queue in queues: + while True: + try: + queue.put_nowait(None) + break + except asyncio.QueueFull: + try: + queue.get_nowait() + except asyncio.QueueEmpty: + break + + +def _status_for_event(event: object) -> str: + if isinstance(event, RealtimeTranscriptionSessionCreated): + return "🎤 Listening..." + return "✅ Done" + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Dual-delay real-time microphone transcription." + ) + parser.add_argument( + "--model", + default="voxtral-mini-transcribe-realtime-2602", + help="Model ID", + ) + parser.add_argument( + "--fast-delay-ms", + type=int, + default=240, + help="Fast target streaming delay in ms", + ) + parser.add_argument( + "--slow-delay-ms", + type=int, + default=2400, + help="Slow target streaming delay in ms", + ) + parser.add_argument( + "--sample-rate", + type=int, + default=16000, + choices=[8000, 16000, 22050, 44100, 48000], + help="Sample rate in Hz", + ) + parser.add_argument( + "--chunk-duration", + type=int, + default=10, + help="Chunk duration in ms", + ) + parser.add_argument( + "--api-key", + default=os.environ.get("MISTRAL_API_KEY"), + help="Mistral API key", + ) + parser.add_argument( + "--base-url", + default=os.environ.get("MISTRAL_BASE_URL", "wss://api.mistral.ai"), + ) + return parser.parse_args() + + +async def run_stream( + *, + client: Mistral, + model: str, + delay_ms: int, + audio_stream: AsyncIterator[bytes], + audio_format: AudioFormat, + state: DualTranscriptState, + update_queue: asyncio.Queue[None], + is_fast: bool, +) -> None: + try: + async for event in client.audio.realtime.transcribe_stream( + audio_stream=audio_stream, + model=model, + audio_format=audio_format, + target_streaming_delay_ms=delay_ms, + ): + if isinstance(event, RealtimeTranscriptionSessionCreated): + if is_fast: + state.fast_status = _status_for_event(event) + else: + state.slow_status = _status_for_event(event) + elif isinstance(event, TranscriptionStreamTextDelta): + if is_fast: + state.fast_full_text += event.text + else: + state.slow_full_text += event.text + elif isinstance(event, TranscriptionStreamDone): + if is_fast: + state.fast_status = _status_for_event(event) + state.fast_done = True + else: + state.slow_status = _status_for_event(event) + state.slow_done = True + break + elif isinstance(event, RealtimeTranscriptionError): + state.set_error(str(event.error)) + break + elif isinstance(event, UnknownRealtimeEvent): + continue + + if update_queue.empty(): + update_queue.put_nowait(None) + except Exception as exc: # pragma: no cover - safety net for UI demo + state.set_error(str(exc)) + if update_queue.empty(): + update_queue.put_nowait(None) + + +async def ui_loop( + display: DualTranscriptDisplay, + update_queue: asyncio.Queue[None], + stop_event: asyncio.Event, + *, + refresh_hz: float = 12.0, +) -> None: + with Live( + display.render(), console=console, refresh_per_second=refresh_hz, screen=True + ) as live: + while not stop_event.is_set(): + try: + await asyncio.wait_for(update_queue.get(), timeout=0.25) + except asyncio.TimeoutError: + pass + live.update(display.render()) + + +async def main() -> int: + args = parse_args() + api_key = args.api_key or os.environ["MISTRAL_API_KEY"] + + try: + load_pyaudio() + except RuntimeError as exc: + console.print(str(exc), style="red") + return 1 + + state = DualTranscriptState() + display = DualTranscriptDisplay( + model=args.model, + fast_delay_ms=args.fast_delay_ms, + slow_delay_ms=args.slow_delay_ms, + state=state, + ) + + client = Mistral(api_key=api_key, server_url=args.base_url) + audio_format = AudioFormat(encoding="pcm_s16le", sample_rate=args.sample_rate) + + fast_queue: asyncio.Queue[bytes | None] = asyncio.Queue(maxsize=50) + slow_queue: asyncio.Queue[bytes | None] = asyncio.Queue(maxsize=50) + + stop_event = asyncio.Event() + update_queue: asyncio.Queue[None] = asyncio.Queue(maxsize=1) + + broadcaster = asyncio.create_task( + broadcast_microphone( + sample_rate=args.sample_rate, + chunk_duration_ms=args.chunk_duration, + queues=(fast_queue, slow_queue), + ) + ) + + fast_task = asyncio.create_task( + run_stream( + client=client, + model=args.model, + delay_ms=args.fast_delay_ms, + audio_stream=queue_audio_iter(fast_queue), + audio_format=audio_format, + state=state, + update_queue=update_queue, + is_fast=True, + ) + ) + + slow_task = asyncio.create_task( + run_stream( + client=client, + model=args.model, + delay_ms=args.slow_delay_ms, + audio_stream=queue_audio_iter(slow_queue), + audio_format=audio_format, + state=state, + update_queue=update_queue, + is_fast=False, + ) + ) + + ui_task = asyncio.create_task( + ui_loop(display, update_queue, stop_event, refresh_hz=12.0) + ) + + try: + while True: + await asyncio.sleep(0.1) + for task in (broadcaster, fast_task, slow_task): + if not task.done(): + continue + exc = task.exception() + if exc: + state.set_error(str(exc)) + if update_queue.empty(): + update_queue.put_nowait(None) + stop_event.set() + break + if state.error: + stop_event.set() + break + if state.fast_done and state.slow_done: + stop_event.set() + break + except KeyboardInterrupt: + state.fast_status = "⏹️ Stopped" + state.slow_status = "⏹️ Stopped" + stop_event.set() + finally: + broadcaster.cancel() + fast_task.cancel() + slow_task.cancel() + await asyncio.gather(broadcaster, fast_task, slow_task, return_exceptions=True) + await ui_task + + return 0 if not state.error else 1 + + +if __name__ == "__main__": + sys.exit(asyncio.run(main())) diff --git a/examples/mistral/audio/async_realtime_transcription_microphone.py b/examples/mistral/audio/async_realtime_transcription_microphone.py index 191a21e4..49568aea 100644 --- a/examples/mistral/audio/async_realtime_transcription_microphone.py +++ b/examples/mistral/audio/async_realtime_transcription_microphone.py @@ -33,14 +33,17 @@ TranscriptionStreamTextDelta, ) +from pyaudio_utils import load_pyaudio + console = Console() class TranscriptDisplay: """Manages the live transcript display.""" - def __init__(self, model: str) -> None: + def __init__(self, model: str, target_streaming_delay_ms: int | None) -> None: self.model = model + self.target_streaming_delay_ms = target_streaming_delay_ms self.transcript = "" self.status = "🔌 Connecting..." self.error: str | None = None @@ -65,6 +68,10 @@ def render(self) -> Layout: header_text = Text() header_text.append("│ ", style="dim") header_text.append(self.model, style="dim") + if self.target_streaming_delay_ms is not None: + header_text.append( + f" · delay {self.target_streaming_delay_ms}ms", style="dim" + ) header_text.append(" │ ", style="dim") if "Listening" in self.status: @@ -126,7 +133,7 @@ async def iter_microphone( Yield microphone PCM chunks using PyAudio (16-bit mono). Encoding is always pcm_s16le. """ - import pyaudio + pyaudio = load_pyaudio() p = pyaudio.PyAudio() chunk_samples = int(sample_rate * chunk_duration_ms / 1000) @@ -164,6 +171,12 @@ def parse_args() -> argparse.Namespace: parser.add_argument( "--chunk-duration", type=int, default=10, help="Chunk duration in ms" ) + parser.add_argument( + "--target-streaming-delay-ms", + type=int, + default=None, + help="Target streaming delay in milliseconds", + ) parser.add_argument( "--api-key", default=os.environ.get("MISTRAL_API_KEY"), help="Mistral API key" ) @@ -178,6 +191,12 @@ async def main() -> int: args = parse_args() api_key = args.api_key or os.environ["MISTRAL_API_KEY"] + try: + load_pyaudio() + except RuntimeError as exc: + console.print(str(exc), style="red") + return 1 + client = Mistral(api_key=api_key, server_url=args.base_url) # microphone is always pcm_s16le here @@ -187,7 +206,9 @@ async def main() -> int: sample_rate=args.sample_rate, chunk_duration_ms=args.chunk_duration ) - display = TranscriptDisplay(model=args.model) + display = TranscriptDisplay( + model=args.model, target_streaming_delay_ms=args.target_streaming_delay_ms + ) with Live( display.render(), console=console, refresh_per_second=10, screen=True @@ -197,6 +218,7 @@ async def main() -> int: audio_stream=mic_stream, model=args.model, audio_format=audio_format, + target_streaming_delay_ms=args.target_streaming_delay_ms, ): if isinstance(event, RealtimeTranscriptionSessionCreated): display.set_listening() @@ -217,6 +239,10 @@ async def main() -> int: except KeyboardInterrupt: display.status = "⏹️ Stopped" live.update(display.render()) + except Exception as exc: + display.set_error(str(exc)) + live.update(display.render()) + return 1 return 0 diff --git a/examples/mistral/audio/async_realtime_transcription_stream.py b/examples/mistral/audio/async_realtime_transcription_stream.py index 0a0ac609..c005cf3f 100644 --- a/examples/mistral/audio/async_realtime_transcription_stream.py +++ b/examples/mistral/audio/async_realtime_transcription_stream.py @@ -90,6 +90,12 @@ def parse_args() -> argparse.Namespace: default=0.01, help="Delay between chunks in seconds", ) + parser.add_argument( + "--target-streaming-delay-ms", + type=int, + default=None, + help="Target streaming delay in milliseconds", + ) parser.add_argument( "--no-convert", action="store_true", @@ -120,6 +126,7 @@ async def main() -> int: ), model=args.model, audio_format=AudioFormat(encoding="pcm_s16le", sample_rate=16000), + target_streaming_delay_ms=args.target_streaming_delay_ms, ): if isinstance(event, TranscriptionStreamTextDelta): print(event.text, end="", flush=True) diff --git a/examples/mistral/audio/pyaudio_utils.py b/examples/mistral/audio/pyaudio_utils.py new file mode 100644 index 00000000..af72a885 --- /dev/null +++ b/examples/mistral/audio/pyaudio_utils.py @@ -0,0 +1,38 @@ +from __future__ import annotations + +from types import ModuleType + + +def load_pyaudio() -> ModuleType: + """ + Import PyAudio with a friendly error when PortAudio is missing. + + Raises: + RuntimeError: If PyAudio/PortAudio cannot be imported. + """ + try: + import pyaudio + except Exception as exc: + details = str(exc).lower() + if isinstance(exc, ModuleNotFoundError) and exc.name == "pyaudio": + message = ( + "PyAudio is required to use the microphone.\n" + "Install PortAudio (eg. for macos: brew install portaudio), then " + "reinstall PyAudio." + ) + elif "pyaudio._portaudio" in details or "portaudio" in details: + message = ( + "PyAudio is installed, but the PortAudio native library is missing or " + "failed to load.\n" + "Install PortAudio (eg. for macos: brew install portaudio), then " + "reinstall PyAudio." + ) + else: + message = ( + "PyAudio is required to use the microphone, but it could not be " + "imported.\n" + "Install PortAudio (eg. for macos: brew install portaudio), then " + "reinstall PyAudio." + ) + raise RuntimeError(message) from exc + return pyaudio diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index 998b8dbe..eca854b4 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -48,6 +48,7 @@ exclude_files=( "examples/mistral/agents/async_conversation_run_mcp_remote.py" "examples/mistral/audio/async_realtime_transcription_microphone.py" "examples/mistral/audio/async_realtime_transcription_stream.py" + "examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py" ) # Files that require extra dependencies (agents, mcp, audio, etc.) diff --git a/src/mistralai/extra/realtime/connection.py b/src/mistralai/extra/realtime/connection.py index ffbbc735..6547052b 100644 --- a/src/mistralai/extra/realtime/connection.py +++ b/src/mistralai/extra/realtime/connection.py @@ -18,15 +18,21 @@ from mistralai.client.models import ( AudioFormat, + RealtimeTranscriptionInputAudioAppend, + RealtimeTranscriptionInputAudioEnd, + RealtimeTranscriptionInputAudioFlush, RealtimeTranscriptionError, RealtimeTranscriptionSession, RealtimeTranscriptionSessionCreated, RealtimeTranscriptionSessionUpdated, + RealtimeTranscriptionSessionUpdateMessage, + RealtimeTranscriptionSessionUpdatePayload, TranscriptionStreamDone, TranscriptionStreamLanguage, TranscriptionStreamSegmentDelta, TranscriptionStreamTextDelta, ) +from mistralai.client.types import UNSET class UnknownRealtimeEvent(BaseModel): @@ -36,6 +42,7 @@ class UnknownRealtimeEvent(BaseModel): - invalid JSON payload - schema validation failure """ + type: Optional[str] content: Any error: Optional[str] = None @@ -56,7 +63,6 @@ class UnknownRealtimeEvent(BaseModel): UnknownRealtimeEvent, ] - _MESSAGE_MODELS: dict[str, Any] = { "session.created": RealtimeTranscriptionSessionCreated, "session.updated": RealtimeTranscriptionSessionUpdated, @@ -108,7 +114,6 @@ def __init__( ) -> None: self._websocket = websocket self._session = session - self._audio_format = session.audio_format self._closed = False self._initial_events: Deque[RealtimeEvent] = deque(initial_events or []) @@ -122,7 +127,7 @@ def session(self) -> RealtimeTranscriptionSession: @property def audio_format(self) -> AudioFormat: - return self._audio_format + return self._session.audio_format @property def is_closed(self) -> bool: @@ -134,27 +139,46 @@ async def send_audio( if self._closed: raise RuntimeError("Connection is closed") - message = { - "type": "input_audio.append", - "audio": base64.b64encode(bytes(audio_bytes)).decode("ascii"), - } - await self._websocket.send(json.dumps(message)) + message = RealtimeTranscriptionInputAudioAppend( + audio=base64.b64encode(bytes(audio_bytes)).decode("ascii") + ) + await self._websocket.send(message.model_dump_json()) - async def update_session(self, audio_format: AudioFormat) -> None: + async def flush_audio(self) -> None: if self._closed: raise RuntimeError("Connection is closed") + await self._websocket.send( + RealtimeTranscriptionInputAudioFlush().model_dump_json() + ) - self._audio_format = audio_format - message = { - "type": "session.update", - "session": {"audio_format": audio_format.model_dump(mode="json")}, - } - await self._websocket.send(json.dumps(message)) + async def update_session( + self, + audio_format: Optional[AudioFormat] = None, + *, + target_streaming_delay_ms: Optional[int] = None, + ) -> None: + if self._closed: + raise RuntimeError("Connection is closed") + + if audio_format is None and target_streaming_delay_ms is None: + raise ValueError("At least one session field must be provided") + + message = RealtimeTranscriptionSessionUpdateMessage( + session=RealtimeTranscriptionSessionUpdatePayload( + audio_format=audio_format if audio_format is not None else UNSET, + target_streaming_delay_ms=target_streaming_delay_ms + if target_streaming_delay_ms is not None + else UNSET, + ) + ) + await self._websocket.send(message.model_dump_json()) async def end_audio(self) -> None: if self._closed: return - await self._websocket.send(json.dumps({"type": "input_audio.end"})) + await self._websocket.send( + RealtimeTranscriptionInputAudioEnd().model_dump_json() + ) async def close(self, *, code: int = 1000, reason: str = "") -> None: if self._closed: @@ -202,6 +226,7 @@ async def events(self) -> AsyncIterator[RealtimeEvent]: await self.close() def _apply_session_updates(self, ev: RealtimeEvent) -> None: - if isinstance(ev, RealtimeTranscriptionSessionCreated) or isinstance(ev, RealtimeTranscriptionSessionUpdated): + if isinstance(ev, RealtimeTranscriptionSessionCreated) or isinstance( + ev, RealtimeTranscriptionSessionUpdated + ): self._session = ev.session - self._audio_format = ev.session.audio_format diff --git a/src/mistralai/extra/realtime/transcription.py b/src/mistralai/extra/realtime/transcription.py index 655fd9c1..b216e676 100644 --- a/src/mistralai/extra/realtime/transcription.py +++ b/src/mistralai/extra/realtime/transcription.py @@ -67,6 +67,7 @@ async def connect( self, model: str, audio_format: Optional[AudioFormat] = None, + target_streaming_delay_ms: Optional[int] = None, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, @@ -122,8 +123,11 @@ async def connect( initial_events=initial_events, ) - if audio_format is not None: - await connection.update_session(audio_format) + if audio_format is not None or target_streaming_delay_ms is not None: + await connection.update_session( + audio_format, + target_streaming_delay_ms=target_streaming_delay_ms, + ) return connection @@ -141,6 +145,7 @@ async def transcribe_stream( audio_stream: AsyncIterator[bytes], model: str, audio_format: Optional[AudioFormat] = None, + target_streaming_delay_ms: Optional[int] = None, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, @@ -154,6 +159,7 @@ async def transcribe_stream( async with await self.connect( model=model, audio_format=audio_format, + target_streaming_delay_ms=target_streaming_delay_ms, server_url=server_url, timeout_ms=timeout_ms, http_headers=http_headers, @@ -164,6 +170,7 @@ async def _send() -> None: if connection.is_closed: break await connection.send_audio(chunk) + await connection.flush_audio() await connection.end_audio() send_task = asyncio.create_task(_send()) From 11babe1c440712a31d362c4a8c216bb8ac688d61 Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Thu, 26 Feb 2026 16:25:52 +0100 Subject: [PATCH 33/42] fix: correct inaccurate migration guide documentation (#375) - Remove false "Type Name Changes" section (claimed renamings don't exist) - Remove false "Shorter Request/Response Class Names" section (long names still used) - Simplify installation section with accurate info - Add clarifying note for v2 users reading v0->v1 section --- MIGRATION.md | 33 +++++---------------------------- 1 file changed, 5 insertions(+), 28 deletions(-) diff --git a/MIGRATION.md b/MIGRATION.md index 906173fe..fe5c8423 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -46,41 +46,16 @@ Azure and GCP SDKs now live under the `mistralai` namespace as separate distribu #### Installation Changes -| v1 | v2 | -|---|---| -| `pip install mistralai` | `pip install mistralai` (includes Azure and GCP) | -| `pip install mistralai[gcp]` (for GCP auth) | `pip install "mistralai[gcp]"` (for GCP auth dependencies) | +The main `mistralai` package now bundles Azure and GCP support. You can also install `mistralai-azure` or `mistralai-gcp` as standalone distributions. -Azure and GCP are now standalone distributions that can be installed independently of the core SDK. The `mistralai[azure]` and `mistralai[gcp]` extras are syntactic sugar that pull in the respective distributions. +For GCP authentication dependencies, use `pip install "mistralai[gcp]"`. ### What Stays the Same - The `Mistral` client API is unchanged - All models (`UserMessage`, `AssistantMessage`, etc.) work the same way -### Type Name Changes - -Some type names have been updated for clarity and consistency: - -| Old Name | New Name | -|---|---| -| `Tools` | `ConversationRequestTool` | -| `ToolsTypedDict` | `ConversationRequestToolTypedDict` | -| `HandoffExecution` | `ConversationRequestHandoffExecution` | -| `AgentVersion` | `ConversationRequestAgentVersion` | - -### Shorter Request/Response Class Names - -Internal request and response wrapper classes now use concise names: - -| Old Name | New Name | -|---|---| -| `JobsAPIRoutesFineTuningArchiveFineTunedModelRequest` | `ArchiveModelRequest` | -| `JobsAPIRoutesFineTuningCreateFineTuningJobResponse` | `CreateFineTuningJobResponse` | -| `FilesAPIRoutesUploadFileRequest` | `UploadFileRequest` | -| `AgentsAPIV1ConversationsAppendRequest` | `AppendConversationRequest` | - -This affects all operation-specific request/response types. Core models like `UserMessage`, `ChatCompletionRequest`, etc. are unchanged. +### Enums Enums now accept unknown values for forward compatibility with API changes. @@ -90,6 +65,8 @@ Enums now accept unknown values for forward compatibility with API changes. Version 1.0 introduced significant changes to improve usability and consistency. +> **Note:** The v1.x examples below use v1-style imports (e.g., `from mistralai import Mistral`). If you're on v2.x, combine these API changes with the [v1 to v2 import changes](#migrating-from-v1x-to-v2x) above. + ### Major Changes 1. **Unified Client Class**: `MistralClient` and `MistralAsyncClient` consolidated into a single `Mistral` class From ac5195f601fd1f232e009f940fe3fc71324b0426 Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Thu, 26 Feb 2026 17:11:38 +0100 Subject: [PATCH 34/42] fix: remove inaccurate installation claims in migration docs (#378) Removes incorrect statements about Azure/GCP bundling changes, since the installation behavior hasn't changed between versions. --- MIGRATION.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/MIGRATION.md b/MIGRATION.md index fe5c8423..2fc3d13d 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -46,8 +46,6 @@ Azure and GCP SDKs now live under the `mistralai` namespace as separate distribu #### Installation Changes -The main `mistralai` package now bundles Azure and GCP support. You can also install `mistralai-azure` or `mistralai-gcp` as standalone distributions. - For GCP authentication dependencies, use `pip install "mistralai[gcp]"`. ### What Stays the Same From 7a1a8ac90fb4945491de49677877538193af8c6b Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Fri, 27 Feb 2026 09:47:19 +0100 Subject: [PATCH 35/42] feat: align pyproject.toml version with gen.lock in SDK generation (#383) * feat: add workflow to align pyproject.toml version with gen.lock Add a PR-triggered workflow that automatically updates pyproject.toml version to match gen.lock when speakeasybot creates/updates a PR. This fixes version mismatch between gen.lock and pyproject.toml caused by pyproject.toml being in .genignore (Speakeasy cannot update it). The workflow: - Triggers on PR events when gen.lock files change - Only runs for PRs from speakeasybot - Reads releaseVersion from gen.lock - Updates pyproject.toml using uv version - Commits and pushes the change * fix: strip quotes from version extraction in align workflow Handle potential quoted YAML values in gen.lock releaseVersion field. --- .../workflows/align_pyproject_version.yaml | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 .github/workflows/align_pyproject_version.yaml diff --git a/.github/workflows/align_pyproject_version.yaml b/.github/workflows/align_pyproject_version.yaml new file mode 100644 index 00000000..0f6d7463 --- /dev/null +++ b/.github/workflows/align_pyproject_version.yaml @@ -0,0 +1,58 @@ +name: Align pyproject.toml version + +on: + pull_request: + types: [opened, synchronize] + paths: + - ".speakeasy/gen.lock" + - "packages/azure/.speakeasy/gen.lock" + - "packages/gcp/.speakeasy/gen.lock" + +permissions: + contents: write + +jobs: + align-version: + if: github.actor == 'speakeasybot' + runs-on: ubuntu-latest + steps: + - name: Checkout PR branch + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 + with: + ref: ${{ github.head_ref }} + + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + + - name: Align main SDK version + if: hashFiles('.speakeasy/gen.lock') != '' + run: | + VERSION=$(grep 'releaseVersion:' .speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') + echo "Aligning main SDK to version: $VERSION" + uv version "$VERSION" + + - name: Align Azure SDK version + if: hashFiles('packages/azure/.speakeasy/gen.lock') != '' + run: | + VERSION=$(grep 'releaseVersion:' packages/azure/.speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') + echo "Aligning Azure SDK to version: $VERSION" + uv version "$VERSION" --directory packages/azure + + - name: Align GCP SDK version + if: hashFiles('packages/gcp/.speakeasy/gen.lock') != '' + run: | + VERSION=$(grep 'releaseVersion:' packages/gcp/.speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') + echo "Aligning GCP SDK to version: $VERSION" + uv version "$VERSION" --directory packages/gcp + + - name: Commit and push + run: | + git config user.email "action@github.com" + git config user.name "GitHub Action" + git add pyproject.toml packages/*/pyproject.toml 2>/dev/null || true + if git diff --cached --quiet; then + echo "No version change needed" + else + git commit -m "chore: align pyproject.toml version with gen.lock" + git push + fi From e2cdc231291a20de4e300c932f036c66a08b210e Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Fri, 27 Feb 2026 10:51:48 +0100 Subject: [PATCH 36/42] fix: check for github-actions[bot] instead of speakeasybot (#387) Speakeasy PRs are authored by github-actions[bot], not speakeasybot. --- .github/workflows/align_pyproject_version.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/align_pyproject_version.yaml b/.github/workflows/align_pyproject_version.yaml index 0f6d7463..aeea2096 100644 --- a/.github/workflows/align_pyproject_version.yaml +++ b/.github/workflows/align_pyproject_version.yaml @@ -13,7 +13,7 @@ permissions: jobs: align-version: - if: github.actor == 'speakeasybot' + if: github.actor == 'github-actions[bot]' runs-on: ubuntu-latest steps: - name: Checkout PR branch From ed618e8a432af423800e698c474d62e7c21d78a3 Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Fri, 27 Feb 2026 15:25:21 +0100 Subject: [PATCH 37/42] fix: use workflow_run trigger for version alignment (#390) * fix: use workflow_run trigger instead of pull_request GitHub Actions using GITHUB_TOKEN don't trigger pull_request workflows (security measure to prevent infinite loops). Using workflow_run instead triggers when Speakeasy generation completes, then checks out the PR branch. * fix: restrict to speakeasy branches to address CodeQL warning * fix: use GitHub API instead of checkout to avoid CodeQL warning No checkout means no untrusted code execution. Uses gh api to: - Fetch gen.lock content from PR branch - Fetch and update pyproject.toml - Commit changes via API * refactor: extract reusable function and improve error handling - Extract align_version() function to eliminate code duplication - Add proper error handling with set -euo pipefail - Use ::group:: for cleaner logs - Continue processing all SDKs even if one fails - Track exit code to fail workflow if any SDK fails --- .../workflows/align_pyproject_version.yaml | 117 +++++++++++------- 1 file changed, 75 insertions(+), 42 deletions(-) diff --git a/.github/workflows/align_pyproject_version.yaml b/.github/workflows/align_pyproject_version.yaml index aeea2096..77a5b9b5 100644 --- a/.github/workflows/align_pyproject_version.yaml +++ b/.github/workflows/align_pyproject_version.yaml @@ -1,58 +1,91 @@ name: Align pyproject.toml version on: - pull_request: - types: [opened, synchronize] - paths: - - ".speakeasy/gen.lock" - - "packages/azure/.speakeasy/gen.lock" - - "packages/gcp/.speakeasy/gen.lock" + workflow_run: + workflows: + - "Generate MISTRALAI" + - "Generate MISTRAL-PYTHON-SDK-AZURE" + - "Generate MISTRAL-PYTHON-SDK-GOOGLE-CLOUD" + types: [completed] permissions: contents: write jobs: align-version: - if: github.actor == 'github-actions[bot]' + if: github.event.workflow_run.conclusion == 'success' runs-on: ubuntu-latest + env: + GH_TOKEN: ${{ github.token }} + BRANCH: ${{ github.event.workflow_run.head_branch }} + REPO: ${{ github.repository }} steps: - - name: Checkout PR branch - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - ref: ${{ github.head_ref }} + - name: Align SDK versions + run: | + set -euo pipefail - - name: Install uv - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + align_version() { + local gen_lock_path="$1" + local pyproject_path="$2" + local sdk_name="$3" - - name: Align main SDK version - if: hashFiles('.speakeasy/gen.lock') != '' - run: | - VERSION=$(grep 'releaseVersion:' .speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') - echo "Aligning main SDK to version: $VERSION" - uv version "$VERSION" + echo "::group::Aligning $sdk_name" - - name: Align Azure SDK version - if: hashFiles('packages/azure/.speakeasy/gen.lock') != '' - run: | - VERSION=$(grep 'releaseVersion:' packages/azure/.speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') - echo "Aligning Azure SDK to version: $VERSION" - uv version "$VERSION" --directory packages/azure + # Fetch gen.lock from PR branch via API + if ! GEN_LOCK=$(gh api "repos/$REPO/contents/$gen_lock_path?ref=$BRANCH" --jq '.content' 2>/dev/null | base64 -d); then + echo "No gen.lock found at $gen_lock_path, skipping" + echo "::endgroup::" + return 0 + fi - - name: Align GCP SDK version - if: hashFiles('packages/gcp/.speakeasy/gen.lock') != '' - run: | - VERSION=$(grep 'releaseVersion:' packages/gcp/.speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') - echo "Aligning GCP SDK to version: $VERSION" - uv version "$VERSION" --directory packages/gcp + VERSION=$(echo "$GEN_LOCK" | grep 'releaseVersion:' | head -1 | awk '{print $2}' | tr -d '"') + if [ -z "$VERSION" ]; then + echo "No releaseVersion found in $gen_lock_path" + echo "::endgroup::" + return 0 + fi + echo "Found version: $VERSION" - - name: Commit and push - run: | - git config user.email "action@github.com" - git config user.name "GitHub Action" - git add pyproject.toml packages/*/pyproject.toml 2>/dev/null || true - if git diff --cached --quiet; then - echo "No version change needed" - else - git commit -m "chore: align pyproject.toml version with gen.lock" - git push - fi + # Fetch current pyproject.toml + if ! PYPROJECT_RESPONSE=$(gh api "repos/$REPO/contents/$pyproject_path?ref=$BRANCH" 2>/dev/null); then + echo "Failed to fetch $pyproject_path" + echo "::endgroup::" + return 1 + fi + + CURRENT_SHA=$(echo "$PYPROJECT_RESPONSE" | jq -r '.sha') + PYPROJECT=$(echo "$PYPROJECT_RESPONSE" | jq -r '.content' | base64 -d) + + # Update version in pyproject.toml + UPDATED=$(echo "$PYPROJECT" | sed "s/^version = \".*\"/version = \"$VERSION\"/") + + if [ "$PYPROJECT" = "$UPDATED" ]; then + echo "Version already aligned to $VERSION" + echo "::endgroup::" + return 0 + fi + + # Commit updated file via API + ENCODED=$(echo "$UPDATED" | base64 -w 0) + if ! gh api "repos/$REPO/contents/$pyproject_path" \ + -X PUT \ + -f message="chore: align $sdk_name pyproject.toml version with gen.lock" \ + -f content="$ENCODED" \ + -f sha="$CURRENT_SHA" \ + -f branch="$BRANCH" > /dev/null; then + echo "Failed to commit $pyproject_path (may have been updated by another job)" + echo "::endgroup::" + return 1 + fi + + echo "Updated $pyproject_path to version $VERSION" + echo "::endgroup::" + } + + # Align all SDKs (continue on failure to attempt all) + EXIT_CODE=0 + align_version ".speakeasy/gen.lock" "pyproject.toml" "main SDK" || EXIT_CODE=$? + align_version "packages/azure/.speakeasy/gen.lock" "packages/azure/pyproject.toml" "Azure SDK" || EXIT_CODE=$? + align_version "packages/gcp/.speakeasy/gen.lock" "packages/gcp/pyproject.toml" "GCP SDK" || EXIT_CODE=$? + + exit $EXIT_CODE From 176b844dd57b8451b6327dacf3a3432224594b9f Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Fri, 27 Feb 2026 15:49:19 +0100 Subject: [PATCH 38/42] fix: align pyproject.toml version directly in generation workflows (#392) Instead of using a separate workflow with workflow_run trigger (which runs on the wrong branch), embed the version alignment logic directly in each SDK generation workflow as a job that runs after generation. This approach works because: - Same workflow = same GITHUB_TOKEN permissions - needs: generate ensures it runs after PR is created - No trigger gymnastics or branch detection issues Deletes the standalone align_pyproject_version.yaml workflow. --- .../workflows/align_pyproject_version.yaml | 91 ------------------- .../sdk_generation_mistralai_azure_sdk.yaml | 58 ++++++++++++ .../sdk_generation_mistralai_gcp_sdk.yaml | 58 ++++++++++++ .../sdk_generation_mistralai_sdk.yaml | 58 ++++++++++++ 4 files changed, 174 insertions(+), 91 deletions(-) delete mode 100644 .github/workflows/align_pyproject_version.yaml diff --git a/.github/workflows/align_pyproject_version.yaml b/.github/workflows/align_pyproject_version.yaml deleted file mode 100644 index 77a5b9b5..00000000 --- a/.github/workflows/align_pyproject_version.yaml +++ /dev/null @@ -1,91 +0,0 @@ -name: Align pyproject.toml version - -on: - workflow_run: - workflows: - - "Generate MISTRALAI" - - "Generate MISTRAL-PYTHON-SDK-AZURE" - - "Generate MISTRAL-PYTHON-SDK-GOOGLE-CLOUD" - types: [completed] - -permissions: - contents: write - -jobs: - align-version: - if: github.event.workflow_run.conclusion == 'success' - runs-on: ubuntu-latest - env: - GH_TOKEN: ${{ github.token }} - BRANCH: ${{ github.event.workflow_run.head_branch }} - REPO: ${{ github.repository }} - steps: - - name: Align SDK versions - run: | - set -euo pipefail - - align_version() { - local gen_lock_path="$1" - local pyproject_path="$2" - local sdk_name="$3" - - echo "::group::Aligning $sdk_name" - - # Fetch gen.lock from PR branch via API - if ! GEN_LOCK=$(gh api "repos/$REPO/contents/$gen_lock_path?ref=$BRANCH" --jq '.content' 2>/dev/null | base64 -d); then - echo "No gen.lock found at $gen_lock_path, skipping" - echo "::endgroup::" - return 0 - fi - - VERSION=$(echo "$GEN_LOCK" | grep 'releaseVersion:' | head -1 | awk '{print $2}' | tr -d '"') - if [ -z "$VERSION" ]; then - echo "No releaseVersion found in $gen_lock_path" - echo "::endgroup::" - return 0 - fi - echo "Found version: $VERSION" - - # Fetch current pyproject.toml - if ! PYPROJECT_RESPONSE=$(gh api "repos/$REPO/contents/$pyproject_path?ref=$BRANCH" 2>/dev/null); then - echo "Failed to fetch $pyproject_path" - echo "::endgroup::" - return 1 - fi - - CURRENT_SHA=$(echo "$PYPROJECT_RESPONSE" | jq -r '.sha') - PYPROJECT=$(echo "$PYPROJECT_RESPONSE" | jq -r '.content' | base64 -d) - - # Update version in pyproject.toml - UPDATED=$(echo "$PYPROJECT" | sed "s/^version = \".*\"/version = \"$VERSION\"/") - - if [ "$PYPROJECT" = "$UPDATED" ]; then - echo "Version already aligned to $VERSION" - echo "::endgroup::" - return 0 - fi - - # Commit updated file via API - ENCODED=$(echo "$UPDATED" | base64 -w 0) - if ! gh api "repos/$REPO/contents/$pyproject_path" \ - -X PUT \ - -f message="chore: align $sdk_name pyproject.toml version with gen.lock" \ - -f content="$ENCODED" \ - -f sha="$CURRENT_SHA" \ - -f branch="$BRANCH" > /dev/null; then - echo "Failed to commit $pyproject_path (may have been updated by another job)" - echo "::endgroup::" - return 1 - fi - - echo "Updated $pyproject_path to version $VERSION" - echo "::endgroup::" - } - - # Align all SDKs (continue on failure to attempt all) - EXIT_CODE=0 - align_version ".speakeasy/gen.lock" "pyproject.toml" "main SDK" || EXIT_CODE=$? - align_version "packages/azure/.speakeasy/gen.lock" "packages/azure/pyproject.toml" "Azure SDK" || EXIT_CODE=$? - align_version "packages/gcp/.speakeasy/gen.lock" "packages/gcp/pyproject.toml" "GCP SDK" || EXIT_CODE=$? - - exit $EXIT_CODE diff --git a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml index 22af64aa..b4c2e908 100644 --- a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml @@ -27,3 +27,61 @@ jobs: github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} + + align-version: + needs: generate + runs-on: ubuntu-latest + steps: + - name: Align pyproject.toml version with gen.lock + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + REPO: ${{ github.repository }} + run: | + set -euo pipefail + + # Find the most recent PR created by github-actions bot + PR_BRANCH=$(gh pr list --repo "$REPO" --author "app/github-actions" \ + --json headRefName,updatedAt --jq 'sort_by(.updatedAt) | reverse | .[0].headRefName // empty') + + if [ -z "$PR_BRANCH" ]; then + echo "No PR found from github-actions bot, skipping" + exit 0 + fi + echo "Found PR branch: $PR_BRANCH" + + # Fetch gen.lock from PR branch + if ! GEN_LOCK=$(gh api "repos/$REPO/contents/packages/azure/.speakeasy/gen.lock?ref=$PR_BRANCH" --jq '.content' 2>/dev/null | base64 -d); then + echo "No gen.lock found, skipping" + exit 0 + fi + + VERSION=$(echo "$GEN_LOCK" | grep 'releaseVersion:' | head -1 | awk '{print $2}' | tr -d '"') + if [ -z "$VERSION" ]; then + echo "No releaseVersion found in gen.lock" + exit 0 + fi + echo "Found version: $VERSION" + + # Fetch current pyproject.toml + PYPROJECT_RESPONSE=$(gh api "repos/$REPO/contents/packages/azure/pyproject.toml?ref=$PR_BRANCH") + CURRENT_SHA=$(echo "$PYPROJECT_RESPONSE" | jq -r '.sha') + PYPROJECT=$(echo "$PYPROJECT_RESPONSE" | jq -r '.content' | base64 -d) + + # Update version + UPDATED=$(echo "$PYPROJECT" | sed "s/^version = \".*\"/version = \"$VERSION\"/") + + if [ "$PYPROJECT" = "$UPDATED" ]; then + echo "Version already aligned to $VERSION" + exit 0 + fi + + # Commit updated file + ENCODED=$(echo "$UPDATED" | base64 -w 0) + gh api "repos/$REPO/contents/packages/azure/pyproject.toml" \ + -X PUT \ + -f message="chore: align Azure pyproject.toml version to $VERSION" \ + -f content="$ENCODED" \ + -f sha="$CURRENT_SHA" \ + -f branch="$PR_BRANCH" + + echo "Updated packages/azure/pyproject.toml to version $VERSION" diff --git a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml index bf1d19b1..5a6cbe71 100644 --- a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml @@ -27,3 +27,61 @@ jobs: github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} + + align-version: + needs: generate + runs-on: ubuntu-latest + steps: + - name: Align pyproject.toml version with gen.lock + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + REPO: ${{ github.repository }} + run: | + set -euo pipefail + + # Find the most recent PR created by github-actions bot + PR_BRANCH=$(gh pr list --repo "$REPO" --author "app/github-actions" \ + --json headRefName,updatedAt --jq 'sort_by(.updatedAt) | reverse | .[0].headRefName // empty') + + if [ -z "$PR_BRANCH" ]; then + echo "No PR found from github-actions bot, skipping" + exit 0 + fi + echo "Found PR branch: $PR_BRANCH" + + # Fetch gen.lock from PR branch + if ! GEN_LOCK=$(gh api "repos/$REPO/contents/packages/gcp/.speakeasy/gen.lock?ref=$PR_BRANCH" --jq '.content' 2>/dev/null | base64 -d); then + echo "No gen.lock found, skipping" + exit 0 + fi + + VERSION=$(echo "$GEN_LOCK" | grep 'releaseVersion:' | head -1 | awk '{print $2}' | tr -d '"') + if [ -z "$VERSION" ]; then + echo "No releaseVersion found in gen.lock" + exit 0 + fi + echo "Found version: $VERSION" + + # Fetch current pyproject.toml + PYPROJECT_RESPONSE=$(gh api "repos/$REPO/contents/packages/gcp/pyproject.toml?ref=$PR_BRANCH") + CURRENT_SHA=$(echo "$PYPROJECT_RESPONSE" | jq -r '.sha') + PYPROJECT=$(echo "$PYPROJECT_RESPONSE" | jq -r '.content' | base64 -d) + + # Update version + UPDATED=$(echo "$PYPROJECT" | sed "s/^version = \".*\"/version = \"$VERSION\"/") + + if [ "$PYPROJECT" = "$UPDATED" ]; then + echo "Version already aligned to $VERSION" + exit 0 + fi + + # Commit updated file + ENCODED=$(echo "$UPDATED" | base64 -w 0) + gh api "repos/$REPO/contents/packages/gcp/pyproject.toml" \ + -X PUT \ + -f message="chore: align GCP pyproject.toml version to $VERSION" \ + -f content="$ENCODED" \ + -f sha="$CURRENT_SHA" \ + -f branch="$PR_BRANCH" + + echo "Updated packages/gcp/pyproject.toml to version $VERSION" diff --git a/.github/workflows/sdk_generation_mistralai_sdk.yaml b/.github/workflows/sdk_generation_mistralai_sdk.yaml index cbe8f1e8..37b8a523 100644 --- a/.github/workflows/sdk_generation_mistralai_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_sdk.yaml @@ -27,3 +27,61 @@ jobs: github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} + + align-version: + needs: generate + runs-on: ubuntu-latest + steps: + - name: Align pyproject.toml version with gen.lock + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + REPO: ${{ github.repository }} + run: | + set -euo pipefail + + # Find the most recent PR created by github-actions bot + PR_BRANCH=$(gh pr list --repo "$REPO" --author "app/github-actions" \ + --json headRefName,updatedAt --jq 'sort_by(.updatedAt) | reverse | .[0].headRefName // empty') + + if [ -z "$PR_BRANCH" ]; then + echo "No PR found from github-actions bot, skipping" + exit 0 + fi + echo "Found PR branch: $PR_BRANCH" + + # Fetch gen.lock from PR branch + if ! GEN_LOCK=$(gh api "repos/$REPO/contents/.speakeasy/gen.lock?ref=$PR_BRANCH" --jq '.content' 2>/dev/null | base64 -d); then + echo "No gen.lock found, skipping" + exit 0 + fi + + VERSION=$(echo "$GEN_LOCK" | grep 'releaseVersion:' | head -1 | awk '{print $2}' | tr -d '"') + if [ -z "$VERSION" ]; then + echo "No releaseVersion found in gen.lock" + exit 0 + fi + echo "Found version: $VERSION" + + # Fetch current pyproject.toml + PYPROJECT_RESPONSE=$(gh api "repos/$REPO/contents/pyproject.toml?ref=$PR_BRANCH") + CURRENT_SHA=$(echo "$PYPROJECT_RESPONSE" | jq -r '.sha') + PYPROJECT=$(echo "$PYPROJECT_RESPONSE" | jq -r '.content' | base64 -d) + + # Update version + UPDATED=$(echo "$PYPROJECT" | sed "s/^version = \".*\"/version = \"$VERSION\"/") + + if [ "$PYPROJECT" = "$UPDATED" ]; then + echo "Version already aligned to $VERSION" + exit 0 + fi + + # Commit updated file + ENCODED=$(echo "$UPDATED" | base64 -w 0) + gh api "repos/$REPO/contents/pyproject.toml" \ + -X PUT \ + -f message="chore: align pyproject.toml version to $VERSION" \ + -f content="$ENCODED" \ + -f sha="$CURRENT_SHA" \ + -f branch="$PR_BRANCH" + + echo "Updated pyproject.toml to version $VERSION" From b5f8c81946bcf2b82b0baca8b934097f553fad0f Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Fri, 27 Feb 2026 16:09:05 +0100 Subject: [PATCH 39/42] fix: use checkout + uv version to also update uv.lock (#393) The API-only approach only updated pyproject.toml but not uv.lock. This version: - Checks out the PR branch (safe for workflow_dispatch context) - Uses `uv version` to update both pyproject.toml and uv.lock - Commits and pushes the changes Security note: checkout is safe here because: - workflow_dispatch trigger (not pull_request_target) - Branch is in same repo, not a fork - Branch was just created by Speakeasy in the same workflow run --- .../sdk_generation_mistralai_azure_sdk.yaml | 70 +++++++++---------- .../sdk_generation_mistralai_gcp_sdk.yaml | 70 +++++++++---------- .../sdk_generation_mistralai_sdk.yaml | 70 +++++++++---------- 3 files changed, 105 insertions(+), 105 deletions(-) diff --git a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml index b4c2e908..b5d0741b 100644 --- a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml @@ -32,56 +32,56 @@ jobs: needs: generate runs-on: ubuntu-latest steps: - - name: Align pyproject.toml version with gen.lock + - name: Find PR branch + id: find-pr env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - REPO: ${{ github.repository }} run: | - set -euo pipefail - - # Find the most recent PR created by github-actions bot - PR_BRANCH=$(gh pr list --repo "$REPO" --author "app/github-actions" \ + PR_BRANCH=$(gh pr list --repo ${{ github.repository }} --author "app/github-actions" \ --json headRefName,updatedAt --jq 'sort_by(.updatedAt) | reverse | .[0].headRefName // empty') + echo "branch=$PR_BRANCH" >> $GITHUB_OUTPUT - if [ -z "$PR_BRANCH" ]; then - echo "No PR found from github-actions bot, skipping" - exit 0 - fi - echo "Found PR branch: $PR_BRANCH" + - name: Checkout PR branch + if: steps.find-pr.outputs.branch != '' + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + ref: ${{ steps.find-pr.outputs.branch }} - # Fetch gen.lock from PR branch - if ! GEN_LOCK=$(gh api "repos/$REPO/contents/packages/azure/.speakeasy/gen.lock?ref=$PR_BRANCH" --jq '.content' 2>/dev/null | base64 -d); then - echo "No gen.lock found, skipping" - exit 0 - fi + - name: Install uv + if: steps.find-pr.outputs.branch != '' + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 - VERSION=$(echo "$GEN_LOCK" | grep 'releaseVersion:' | head -1 | awk '{print $2}' | tr -d '"') + - name: Align version using uv + if: steps.find-pr.outputs.branch != '' + run: | + set -euo pipefail + + VERSION=$(grep 'releaseVersion:' packages/azure/.speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') if [ -z "$VERSION" ]; then echo "No releaseVersion found in gen.lock" exit 0 fi echo "Found version: $VERSION" - # Fetch current pyproject.toml - PYPROJECT_RESPONSE=$(gh api "repos/$REPO/contents/packages/azure/pyproject.toml?ref=$PR_BRANCH") - CURRENT_SHA=$(echo "$PYPROJECT_RESPONSE" | jq -r '.sha') - PYPROJECT=$(echo "$PYPROJECT_RESPONSE" | jq -r '.content' | base64 -d) - - # Update version - UPDATED=$(echo "$PYPROJECT" | sed "s/^version = \".*\"/version = \"$VERSION\"/") - - if [ "$PYPROJECT" = "$UPDATED" ]; then + CURRENT=$(grep '^version = ' packages/azure/pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + if [ "$CURRENT" = "$VERSION" ]; then echo "Version already aligned to $VERSION" exit 0 fi - # Commit updated file - ENCODED=$(echo "$UPDATED" | base64 -w 0) - gh api "repos/$REPO/contents/packages/azure/pyproject.toml" \ - -X PUT \ - -f message="chore: align Azure pyproject.toml version to $VERSION" \ - -f content="$ENCODED" \ - -f sha="$CURRENT_SHA" \ - -f branch="$PR_BRANCH" + echo "Updating version from $CURRENT to $VERSION" + uv version "$VERSION" --directory packages/azure - echo "Updated packages/azure/pyproject.toml to version $VERSION" + - name: Commit and push + if: steps.find-pr.outputs.branch != '' + run: | + git config user.email "action@github.com" + git config user.name "GitHub Action" + git add packages/azure/pyproject.toml packages/azure/uv.lock + if git diff --cached --quiet; then + echo "No changes to commit" + else + VERSION=$(grep '^version = ' packages/azure/pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + git commit -m "chore: align Azure pyproject.toml and uv.lock to version $VERSION" + git push + fi diff --git a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml index 5a6cbe71..05f88e25 100644 --- a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml @@ -32,56 +32,56 @@ jobs: needs: generate runs-on: ubuntu-latest steps: - - name: Align pyproject.toml version with gen.lock + - name: Find PR branch + id: find-pr env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - REPO: ${{ github.repository }} run: | - set -euo pipefail - - # Find the most recent PR created by github-actions bot - PR_BRANCH=$(gh pr list --repo "$REPO" --author "app/github-actions" \ + PR_BRANCH=$(gh pr list --repo ${{ github.repository }} --author "app/github-actions" \ --json headRefName,updatedAt --jq 'sort_by(.updatedAt) | reverse | .[0].headRefName // empty') + echo "branch=$PR_BRANCH" >> $GITHUB_OUTPUT - if [ -z "$PR_BRANCH" ]; then - echo "No PR found from github-actions bot, skipping" - exit 0 - fi - echo "Found PR branch: $PR_BRANCH" + - name: Checkout PR branch + if: steps.find-pr.outputs.branch != '' + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + ref: ${{ steps.find-pr.outputs.branch }} - # Fetch gen.lock from PR branch - if ! GEN_LOCK=$(gh api "repos/$REPO/contents/packages/gcp/.speakeasy/gen.lock?ref=$PR_BRANCH" --jq '.content' 2>/dev/null | base64 -d); then - echo "No gen.lock found, skipping" - exit 0 - fi + - name: Install uv + if: steps.find-pr.outputs.branch != '' + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 - VERSION=$(echo "$GEN_LOCK" | grep 'releaseVersion:' | head -1 | awk '{print $2}' | tr -d '"') + - name: Align version using uv + if: steps.find-pr.outputs.branch != '' + run: | + set -euo pipefail + + VERSION=$(grep 'releaseVersion:' packages/gcp/.speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') if [ -z "$VERSION" ]; then echo "No releaseVersion found in gen.lock" exit 0 fi echo "Found version: $VERSION" - # Fetch current pyproject.toml - PYPROJECT_RESPONSE=$(gh api "repos/$REPO/contents/packages/gcp/pyproject.toml?ref=$PR_BRANCH") - CURRENT_SHA=$(echo "$PYPROJECT_RESPONSE" | jq -r '.sha') - PYPROJECT=$(echo "$PYPROJECT_RESPONSE" | jq -r '.content' | base64 -d) - - # Update version - UPDATED=$(echo "$PYPROJECT" | sed "s/^version = \".*\"/version = \"$VERSION\"/") - - if [ "$PYPROJECT" = "$UPDATED" ]; then + CURRENT=$(grep '^version = ' packages/gcp/pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + if [ "$CURRENT" = "$VERSION" ]; then echo "Version already aligned to $VERSION" exit 0 fi - # Commit updated file - ENCODED=$(echo "$UPDATED" | base64 -w 0) - gh api "repos/$REPO/contents/packages/gcp/pyproject.toml" \ - -X PUT \ - -f message="chore: align GCP pyproject.toml version to $VERSION" \ - -f content="$ENCODED" \ - -f sha="$CURRENT_SHA" \ - -f branch="$PR_BRANCH" + echo "Updating version from $CURRENT to $VERSION" + uv version "$VERSION" --directory packages/gcp - echo "Updated packages/gcp/pyproject.toml to version $VERSION" + - name: Commit and push + if: steps.find-pr.outputs.branch != '' + run: | + git config user.email "action@github.com" + git config user.name "GitHub Action" + git add packages/gcp/pyproject.toml packages/gcp/uv.lock + if git diff --cached --quiet; then + echo "No changes to commit" + else + VERSION=$(grep '^version = ' packages/gcp/pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + git commit -m "chore: align GCP pyproject.toml and uv.lock to version $VERSION" + git push + fi diff --git a/.github/workflows/sdk_generation_mistralai_sdk.yaml b/.github/workflows/sdk_generation_mistralai_sdk.yaml index 37b8a523..59fe1150 100644 --- a/.github/workflows/sdk_generation_mistralai_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_sdk.yaml @@ -32,56 +32,56 @@ jobs: needs: generate runs-on: ubuntu-latest steps: - - name: Align pyproject.toml version with gen.lock + - name: Find PR branch + id: find-pr env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - REPO: ${{ github.repository }} run: | - set -euo pipefail - - # Find the most recent PR created by github-actions bot - PR_BRANCH=$(gh pr list --repo "$REPO" --author "app/github-actions" \ + PR_BRANCH=$(gh pr list --repo ${{ github.repository }} --author "app/github-actions" \ --json headRefName,updatedAt --jq 'sort_by(.updatedAt) | reverse | .[0].headRefName // empty') + echo "branch=$PR_BRANCH" >> $GITHUB_OUTPUT - if [ -z "$PR_BRANCH" ]; then - echo "No PR found from github-actions bot, skipping" - exit 0 - fi - echo "Found PR branch: $PR_BRANCH" + - name: Checkout PR branch + if: steps.find-pr.outputs.branch != '' + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + ref: ${{ steps.find-pr.outputs.branch }} - # Fetch gen.lock from PR branch - if ! GEN_LOCK=$(gh api "repos/$REPO/contents/.speakeasy/gen.lock?ref=$PR_BRANCH" --jq '.content' 2>/dev/null | base64 -d); then - echo "No gen.lock found, skipping" - exit 0 - fi + - name: Install uv + if: steps.find-pr.outputs.branch != '' + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 - VERSION=$(echo "$GEN_LOCK" | grep 'releaseVersion:' | head -1 | awk '{print $2}' | tr -d '"') + - name: Align version using uv + if: steps.find-pr.outputs.branch != '' + run: | + set -euo pipefail + + VERSION=$(grep 'releaseVersion:' .speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') if [ -z "$VERSION" ]; then echo "No releaseVersion found in gen.lock" exit 0 fi echo "Found version: $VERSION" - # Fetch current pyproject.toml - PYPROJECT_RESPONSE=$(gh api "repos/$REPO/contents/pyproject.toml?ref=$PR_BRANCH") - CURRENT_SHA=$(echo "$PYPROJECT_RESPONSE" | jq -r '.sha') - PYPROJECT=$(echo "$PYPROJECT_RESPONSE" | jq -r '.content' | base64 -d) - - # Update version - UPDATED=$(echo "$PYPROJECT" | sed "s/^version = \".*\"/version = \"$VERSION\"/") - - if [ "$PYPROJECT" = "$UPDATED" ]; then + CURRENT=$(grep '^version = ' pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + if [ "$CURRENT" = "$VERSION" ]; then echo "Version already aligned to $VERSION" exit 0 fi - # Commit updated file - ENCODED=$(echo "$UPDATED" | base64 -w 0) - gh api "repos/$REPO/contents/pyproject.toml" \ - -X PUT \ - -f message="chore: align pyproject.toml version to $VERSION" \ - -f content="$ENCODED" \ - -f sha="$CURRENT_SHA" \ - -f branch="$PR_BRANCH" + echo "Updating version from $CURRENT to $VERSION" + uv version "$VERSION" - echo "Updated pyproject.toml to version $VERSION" + - name: Commit and push + if: steps.find-pr.outputs.branch != '' + run: | + git config user.email "action@github.com" + git config user.name "GitHub Action" + git add pyproject.toml uv.lock + if git diff --cached --quiet; then + echo "No changes to commit" + else + VERSION=$(grep '^version = ' pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + git commit -m "chore: align pyproject.toml and uv.lock to version $VERSION" + git push + fi From 79b53d81907eebba35a4d15cf313bd4583d153bf Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 2 Mar 2026 13:43:50 +0100 Subject: [PATCH 40/42] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20-?= =?UTF-8?q?=20Generate=20MISTRALAI=20MISTRALAI-SDK=202.0.0rc1=20(#394)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ## Python SDK Changes: * `mistral.beta.libraries.documents.list()`: `response.data[].process_status` **Added** * `mistral.beta.libraries.documents.upload()`: `response.process_status` **Added** * `mistral.beta.libraries.documents.get()`: `response.process_status` **Added** * `mistral.beta.libraries.documents.update()`: `response.process_status` **Added** * `mistral.beta.libraries.documents.status()`: `response.process_status` **Added** * chore: align pyproject.toml and uv.lock to version 2.0.0rc1 --------- Co-authored-by: speakeasybot Co-authored-by: GitHub Action --- .speakeasy/gen.lock | 81 +++++++++++-------- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 12 +-- RELEASES.md | 12 ++- docs/models/document.md | 1 + docs/models/embeddingrequest.md | 4 +- docs/models/embeddingrequestinputs.md | 2 +- docs/models/processingstatusout.md | 9 ++- docs/models/processstatus.md | 15 ++++ docs/sdks/embeddings/README.md | 4 +- pyproject.toml | 2 +- src/mistralai/client/_version.py | 4 +- src/mistralai/client/embeddings.py | 8 +- src/mistralai/client/models/__init__.py | 3 + src/mistralai/client/models/document.py | 4 + .../client/models/embeddingrequest.py | 12 +-- .../client/models/processingstatusout.py | 4 + src/mistralai/client/models/processstatus.py | 21 +++++ uv.lock | 2 +- 19 files changed, 138 insertions(+), 64 deletions(-) create mode 100644 docs/models/processstatus.md create mode 100644 src/mistralai/client/models/processstatus.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 7314b7b1..8e89c12b 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,19 +1,19 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: b66b034aac7aa9b38c4fb47a3b3d843e + docChecksum: 9ea68a20ee2ef4565df16947f204034b docVersion: 1.0.0 speakeasyVersion: 1.729.0 generationVersion: 2.841.0 - releaseVersion: 2.0.0b1 - configChecksum: 871b5a7d3687bd2a9ebd0e205e4b36a3 + releaseVersion: 2.0.0rc1 + configChecksum: ba30d47e402a93dc30b5001c33116a3d repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true persistentEdits: - generation_id: 1527268d-25cf-4a8c-8a67-09694eaf0d79 - pristine_commit_hash: 5642b69da5a9a00af1e84ca689b7587f2269d0c4 - pristine_tree_hash: f9eae7c82e85b3114e342a4b6500b9704a266493 + generation_id: 92ab8a00-49e7-471b-bca6-d18f761863df + pristine_commit_hash: 5eb9662433e80c22603fb3a3bf921f6b285fa2d4 + pristine_tree_hash: 9e781b9b07960a689815c5fa6008765ae4a60716 features: python: additionalDependencies: 1.0.0 @@ -643,8 +643,8 @@ trackedFiles: pristine_git_object: 8142772d7ea33ad8a75cf9cf822564ba3f630de2 docs/models/document.md: id: cd1d2a444370 - last_write_checksum: sha1:77076e66dea6f4582e73ecc5a55ef750f026448a - pristine_git_object: 284babb98fbb0279bef2626fa18eada0035572c5 + last_write_checksum: sha1:c10641b02547bedcc982b8997097083dfc562598 + pristine_git_object: 42c639a6a9b235ab4c754286486fa7e8872a0e7e docs/models/documentlibrarytool.md: id: 68083b0ef8f3 last_write_checksum: sha1:76b9f47c399915a338abe929cb10c1b37282eadf @@ -671,12 +671,12 @@ trackedFiles: pristine_git_object: 01656b0a85aa87f19909b18100bb6981f89683fc docs/models/embeddingrequest.md: id: bebee24421b4 - last_write_checksum: sha1:8e2bfa35f55b55f83fa2ebf7bee28cd00cb681d1 - pristine_git_object: 7269c0551a0c1040693eafdd99e1b8ebe98478a5 + last_write_checksum: sha1:087230e81cfbbc539edc7cc1c0a490728276d217 + pristine_git_object: 71d139cdf5c556a1224d707be70f3fabe032fc27 docs/models/embeddingrequestinputs.md: id: 6a35f3b1910a - last_write_checksum: sha1:e12ca056fac504e5af06a304d09154d3ecd17919 - pristine_git_object: 527a089b38b5cd316173ced4dc74a1429c8e4406 + last_write_checksum: sha1:f3bf6b89f279f59010124aa402e282c7c691eb03 + pristine_git_object: a3f82c1c67c726d3ef8e5e5ea5513386acc7c2f4 docs/models/embeddingresponse.md: id: 31cd0f6b7bb5 last_write_checksum: sha1:1d7351c68b075aba8e91e53d29bdab3c6dd5c3a2 @@ -1175,8 +1175,12 @@ trackedFiles: pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 docs/models/processingstatusout.md: id: 83c8c59c1802 - last_write_checksum: sha1:046375bb3035cc033d4484099cd7f5a4f53ce88c - pristine_git_object: 7b67583f4209778ac6f945631c0ee03ba1f4c663 + last_write_checksum: sha1:7dbbfe790616ab4388e532bd78ffc1a5183b332d + pristine_git_object: bc40d3209c4c641dd7416c925b965c1bf7b73b1b + docs/models/processstatus.md: + id: "336054835357" + last_write_checksum: sha1:9b87de1980428307af6c29c2086c0e1f612ebd72 + pristine_git_object: 3a9c004e55cc31abb52d1f0bb450290465d42a1c docs/models/realtimetranscriptionerror.md: id: 4bc5e819565b last_write_checksum: sha1:c93e4b19a0aa68723ea69973a9f22a581c7b2ff6 @@ -1551,8 +1555,8 @@ trackedFiles: pristine_git_object: 9c219b6709d5d5bfa28113efca92012e8c5a5112 docs/sdks/embeddings/README.md: id: 15b5b04486c1 - last_write_checksum: sha1:76cb4876eebccfd2ab9a10a1b25570477a96a5c1 - pristine_git_object: eecb5c9e991dcd2fd5c1f0688efe3b64b4c6de3b + last_write_checksum: sha1:4a279bf9bcd84a9878ef979c78b8b75af3d52f02 + pristine_git_object: cb207d8be2ca86b00dc797fc06eabd1498adb770 docs/sdks/files/README.md: id: e576d7a117f0 last_write_checksum: sha1:f5861c42227b901742fd8afe7155ed6d634b1b4c @@ -1607,8 +1611,8 @@ trackedFiles: pristine_git_object: 036d44b8cfc51599873bd5c401a6aed30450536c src/mistralai/client/_version.py: id: cc807b30de19 - last_write_checksum: sha1:f5109c91723cc927e8513ac9e637512edd91f04e - pristine_git_object: ab2cf01d06f4d4373b52373795db76aa40f00ceb + last_write_checksum: sha1:a48c2cc43ac028eb2e31a129a9551ad2fda3d33f + pristine_git_object: 805648e42e48831658907f664d6536e8bdcd98c0 src/mistralai/client/accesses.py: id: 76fc53bfcf59 last_write_checksum: sha1:ed94623aa8a2bd502572a699a2f54c9281ec283e @@ -1659,8 +1663,8 @@ trackedFiles: pristine_git_object: b3130364c0f3cc90ed1e4407a070bd99e3cce606 src/mistralai/client/embeddings.py: id: f9c17258207e - last_write_checksum: sha1:0fbf92b59fde3199c770a522ead030f8fa65ff5c - pristine_git_object: 5f9d3b9cb611943e509caeda9ddd175e3baee2c3 + last_write_checksum: sha1:d1610bf12dba8b2f8cb27d2f0aa592594dfe6b3a + pristine_git_object: 5d55ffc43c0c98d46e04b238ab23a08d1b9e6a6a src/mistralai/client/errors/__init__.py: id: 0b2db51246df last_write_checksum: sha1:0befddc505c9c47388683126750c7ad0e3fbef52 @@ -1711,8 +1715,8 @@ trackedFiles: pristine_git_object: b8728362b87349118ac6f163f50613dd18c43340 src/mistralai/client/models/__init__.py: id: e0e8dad92725 - last_write_checksum: sha1:50727667552480e8298431f5a3dcc78457c53331 - pristine_git_object: 5ef8b3f3dd9fbb32d4675f7e11808c29fc218c57 + last_write_checksum: sha1:0ac0c956f0f87979e871a00c32884ee3102b6d2b + pristine_git_object: 7d2dfd970d48d54d798f1661206abdc697134434 src/mistralai/client/models/agent.py: id: 1336849c84fb last_write_checksum: sha1:6090ddf2b5b40656dfbf3325f1022a40ae418948 @@ -2075,8 +2079,8 @@ trackedFiles: pristine_git_object: d9fa230e93d4e0886f21c836cf3813855eb8f9fd src/mistralai/client/models/document.py: id: fbbf7428328c - last_write_checksum: sha1:2a5a28c54f0aec50059b6badc1001b1cd120e7d3 - pristine_git_object: 31eebbd1a7d7fdcb498259837c533bfc8008a6f9 + last_write_checksum: sha1:db2e184f7cc97b24e7ec80887c35c32222afd8a8 + pristine_git_object: fcc5bca541a814143b0489528fd20a69004104b1 src/mistralai/client/models/documentlibrarytool.py: id: 3eb3c218f457 last_write_checksum: sha1:d03a6136192b56778bd739d834d9bdc80a09cc23 @@ -2095,8 +2099,8 @@ trackedFiles: pristine_git_object: 732c4ebe3678563ebcdbafd519f93317261586fb src/mistralai/client/models/embeddingrequest.py: id: eadbe3f9040c - last_write_checksum: sha1:e36282eb015b782804b4bdf3d18b596607b020fd - pristine_git_object: 15950590fec8b82a4fb28d69009a6f6cfb83c9ee + last_write_checksum: sha1:3ef4e321a698c4a10389280f8b1c7d0da20f4faf + pristine_git_object: 5fa2d2f63796523e58e281f2d3bcc05a92111842 src/mistralai/client/models/embeddingresponse.py: id: f7d790e84b65 last_write_checksum: sha1:9bb53a5a860c8e10d4d504648d84da73068c0a83 @@ -2503,8 +2507,12 @@ trackedFiles: pristine_git_object: 0c6f4182ca8140e595f601b12fbd582034257587 src/mistralai/client/models/processingstatusout.py: id: 3df842c4140f - last_write_checksum: sha1:007a476e4101cac4d2a9eef94d289f0f486d763a - pristine_git_object: 3acadcc9792c286cd31031a80e108b74bc2c0c4e + last_write_checksum: sha1:d5acc98adcfc76cdc4fc26e090ecfc4d7835a438 + pristine_git_object: ed2a4f22dcffe787ce69bb9c6011a95216cf3928 + src/mistralai/client/models/processstatus.py: + id: "0205512146e6" + last_write_checksum: sha1:22ad3d5fc80fbf3f83db61512e7bc79295c5fc91 + pristine_git_object: 15bdce2056f2f642096fbbdb2ac32ce1e5b8c9cf src/mistralai/client/models/realtimetranscriptionerror.py: id: 8c2267378f48 last_write_checksum: sha1:78637de61d6fc3bc1fff8e95c0a6f5ffc1a3e111 @@ -3463,7 +3471,7 @@ examples: sort_order: "desc" responses: "200": - application/json: {"pagination": {"total_items": 23246, "total_pages": 881485, "current_page": 173326, "page_size": 318395, "has_more": false}, "data": [{"id": "5106c0c7-30fb-4fd3-9083-129b77f9f509", "library_id": "71eb68a2-756e-48b0-9d2b-a04d7bf95ff5", "hash": "", "mime_type": "", "extension": "pdf", "size": 367159, "name": "", "created_at": "2024-09-24T04:50:43.988Z", "uploaded_by_id": "7d65f4d8-1997-479f-bfb4-535c0144b48c", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 957230}]} + application/json: {"pagination": {"total_items": 23246, "total_pages": 881485, "current_page": 173326, "page_size": 318395, "has_more": false}, "data": [{"id": "5106c0c7-30fb-4fd3-9083-129b77f9f509", "library_id": "71eb68a2-756e-48b0-9d2b-a04d7bf95ff5", "hash": "", "mime_type": "", "extension": "pdf", "size": 367159, "name": "", "created_at": "2024-09-24T04:50:43.988Z", "process_status": "noop", "uploaded_by_id": "7d65f4d8-1997-479f-bfb4-535c0144b48c", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 957230}]} "422": application/json: {} libraries_documents_upload_v1: @@ -3475,7 +3483,7 @@ examples: multipart/form-data: {"file": "x-file: example.file"} responses: "200": - application/json: {"id": "d40f9b56-c832-405d-aa99-b3e442254dd8", "library_id": "868d7955-009a-4433-bfc6-ad7b4be4e7e4", "hash": "", "mime_type": "", "extension": "m2v", "size": 418415, "name": "", "created_at": "2025-04-30T20:11:27.130Z", "uploaded_by_id": "7db8d896-09c9-438c-b6dc-aa5c70102b3f", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 61161} + application/json: {"id": "d40f9b56-c832-405d-aa99-b3e442254dd8", "library_id": "868d7955-009a-4433-bfc6-ad7b4be4e7e4", "hash": "", "mime_type": "", "extension": "m2v", "size": 418415, "name": "", "created_at": "2025-04-30T20:11:27.130Z", "process_status": "in_progress", "uploaded_by_id": "7db8d896-09c9-438c-b6dc-aa5c70102b3f", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 61161} "422": application/json: {} libraries_documents_get_v1: @@ -3486,7 +3494,7 @@ examples: document_id: "90973aec-0508-4375-8b00-91d732414745" responses: "200": - application/json: {"id": "0de60230-717d-459a-8c0f-fbb9360c01be", "library_id": "e0bf3cf9-cd3b-405b-b842-ac7fcb9c373e", "hash": "", "mime_type": "", "extension": "jpe", "size": 402478, "name": "", "created_at": "2023-07-29T21:43:20.750Z", "uploaded_by_id": "d5eadabe-d7f2-4f87-a337-f80c192f886d", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 793889} + application/json: {"id": "0de60230-717d-459a-8c0f-fbb9360c01be", "library_id": "e0bf3cf9-cd3b-405b-b842-ac7fcb9c373e", "hash": "", "mime_type": "", "extension": "jpe", "size": 402478, "name": "", "created_at": "2023-07-29T21:43:20.750Z", "process_status": "self_managed", "uploaded_by_id": "d5eadabe-d7f2-4f87-a337-f80c192f886d", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 793889} "422": application/json: {} libraries_documents_update_v1: @@ -3499,7 +3507,7 @@ examples: application/json: {} responses: "200": - application/json: {"id": "1111e519-9ba5-42de-9301-938fbfee59fc", "library_id": "70aac5e3-23f7-439b-bbef-090e4c1dbd6d", "hash": "", "mime_type": "", "extension": "m1v", "size": 802305, "name": "", "created_at": "2024-07-02T20:02:03.680Z", "uploaded_by_id": "08471957-b27d-4437-8242-57256727dc49", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 806683} + application/json: {"id": "1111e519-9ba5-42de-9301-938fbfee59fc", "library_id": "70aac5e3-23f7-439b-bbef-090e4c1dbd6d", "hash": "", "mime_type": "", "extension": "m1v", "size": 802305, "name": "", "created_at": "2024-07-02T20:02:03.680Z", "process_status": "missing_content", "uploaded_by_id": "08471957-b27d-4437-8242-57256727dc49", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 806683} "422": application/json: {} libraries_documents_delete_v1: @@ -3530,7 +3538,7 @@ examples: document_id: "2c904915-d831-4e9d-a345-8ce405bcef66" responses: "200": - application/json: {"document_id": "90473b79-1fd5-437f-bee0-6638bdf69c90", "processing_status": ""} + application/json: {"document_id": "90473b79-1fd5-437f-bee0-6638bdf69c90", "process_status": "waiting_for_capacity", "processing_status": ""} "422": application/json: {} libraries_documents_get_signed_url_v1: @@ -4302,6 +4310,13 @@ examples: application/json: {} examplesVersion: 1.0.2 generatedTests: {} +releaseNotes: | + ## Python SDK Changes: + * `mistral.beta.libraries.documents.list()`: `response.data[].process_status` **Added** + * `mistral.beta.libraries.documents.upload()`: `response.process_status` **Added** + * `mistral.beta.libraries.documents.get()`: `response.process_status` **Added** + * `mistral.beta.libraries.documents.update()`: `response.process_status` **Added** + * `mistral.beta.libraries.documents.status()`: `response.process_status` **Added** generatedFiles: - .gitattributes - .vscode/settings.json diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index e237388a..1c82d91a 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -31,7 +31,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0b1 + version: 2.0.0rc1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index b26cdf2b..2b919f8b 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -16,11 +16,11 @@ sources: - speakeasy-sdk-regen-1772041030 mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:4f8e25101b35a66b9c93089fe3d491990268bdbefb70a349740e01ba9c8e28f8 - sourceBlobDigest: sha256:8566b35549178910c6fd4d005474d612bb9c476ef58785bb51c46251de145f71 + sourceRevisionDigest: sha256:52cd52dd6272c5afe08072790b36e34de9a65c41357bab87a45cf6635dc30db0 + sourceBlobDigest: sha256:7eb63e6d0b2226456aad34b5ae9edd75cc8e015643d478c09b717852e2852065 tags: - latest - - speakeasy-sdk-regen-1772040743 + - speakeasy-sdk-regen-1772205200 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -39,10 +39,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:4f8e25101b35a66b9c93089fe3d491990268bdbefb70a349740e01ba9c8e28f8 - sourceBlobDigest: sha256:8566b35549178910c6fd4d005474d612bb9c476ef58785bb51c46251de145f71 + sourceRevisionDigest: sha256:52cd52dd6272c5afe08072790b36e34de9a65c41357bab87a45cf6635dc30db0 + sourceBlobDigest: sha256:7eb63e6d0b2226456aad34b5ae9edd75cc8e015643d478c09b717852e2852065 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:0bcecf3d1523375a194d6aa13116ffba291da8321e44b01399ae5e24f7ce2e33 + codeSamplesRevisionDigest: sha256:534088a1428d166f80e9669ec6bc67d277e22113c745ef8904789f0c6e6381d9 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.729.0 diff --git a/RELEASES.md b/RELEASES.md index 48b65760..1a631692 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -378,4 +378,14 @@ Based on: ### Generated - [python v2.0.0b1] . ### Releases -- [PyPI v2.0.0b1] https://pypi.org/project/mistralai/2.0.0b1 - . \ No newline at end of file +- [PyPI v2.0.0b1] https://pypi.org/project/mistralai/2.0.0b1 - . + +## 2026-02-27 15:12:59 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0rc1] . +### Releases +- [PyPI v2.0.0rc1] https://pypi.org/project/mistralai/2.0.0rc1 - . \ No newline at end of file diff --git a/docs/models/document.md b/docs/models/document.md index 284babb9..42c639a6 100644 --- a/docs/models/document.md +++ b/docs/models/document.md @@ -16,6 +16,7 @@ | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | | `last_processed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `number_of_pages` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `process_status` | [models.ProcessStatus](../models/processstatus.md) | :heavy_check_mark: | N/A | | `uploaded_by_id` | *Nullable[str]* | :heavy_check_mark: | N/A | | `uploaded_by_type` | *str* | :heavy_check_mark: | N/A | | `tokens_processing_main_content` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 7269c055..71d139cd 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -5,9 +5,9 @@ | Field | Type | Required | Description | Example | | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | -| `model` | *str* | :heavy_check_mark: | The ID of the model to be used for embedding. | mistral-embed | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `inputs` | [models.EmbeddingRequestInputs](../models/embeddingrequestinputs.md) | :heavy_check_mark: | The text content to be embedded, can be a string or an array of strings for fast processing in bulk. | [
"Embed this sentence.",
"As well as this one."
] | +| `inputs` | [models.EmbeddingRequestInputs](../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | | `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. | | | `output_dtype` | [Optional[models.EmbeddingDtype]](../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | | `encoding_format` | [Optional[models.EncodingFormat]](../models/encodingformat.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/embeddingrequestinputs.md b/docs/models/embeddingrequestinputs.md index 527a089b..a3f82c1c 100644 --- a/docs/models/embeddingrequestinputs.md +++ b/docs/models/embeddingrequestinputs.md @@ -1,6 +1,6 @@ # EmbeddingRequestInputs -The text content to be embedded, can be a string or an array of strings for fast processing in bulk. +Text to embed. ## Supported Types diff --git a/docs/models/processingstatusout.md b/docs/models/processingstatusout.md index 7b67583f..bc40d320 100644 --- a/docs/models/processingstatusout.md +++ b/docs/models/processingstatusout.md @@ -3,7 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------- | ------------------- | ------------------- | ------------------- | -| `document_id` | *str* | :heavy_check_mark: | N/A | -| `processing_status` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `process_status` | [models.ProcessStatus](../models/processstatus.md) | :heavy_check_mark: | N/A | +| `processing_status` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/processstatus.md b/docs/models/processstatus.md new file mode 100644 index 00000000..3a9c004e --- /dev/null +++ b/docs/models/processstatus.md @@ -0,0 +1,15 @@ +# ProcessStatus + + +## Values + +| Name | Value | +| ---------------------- | ---------------------- | +| `SELF_MANAGED` | self_managed | +| `MISSING_CONTENT` | missing_content | +| `NOOP` | noop | +| `DONE` | done | +| `TODO` | todo | +| `IN_PROGRESS` | in_progress | +| `ERROR` | error | +| `WAITING_FOR_CAPACITY` | waiting_for_capacity | \ No newline at end of file diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index eecb5c9e..cb207d8b 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -38,8 +38,8 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | -| `model` | *str* | :heavy_check_mark: | The ID of the model to be used for embedding. | mistral-embed | -| `inputs` | [models.EmbeddingRequestInputs](../../models/embeddingrequestinputs.md) | :heavy_check_mark: | The text content to be embedded, can be a string or an array of strings for fast processing in bulk. | [
"Embed this sentence.",
"As well as this one."
] | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | +| `inputs` | [models.EmbeddingRequestInputs](../../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. | | | `output_dtype` | [Optional[models.EmbeddingDtype]](../../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | diff --git a/pyproject.toml b/pyproject.toml index c42e4260..56d23bb8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "2.0.0b1" +version = "2.0.0rc1" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/src/mistralai/client/_version.py b/src/mistralai/client/_version.py index ab2cf01d..805648e4 100644 --- a/src/mistralai/client/_version.py +++ b/src/mistralai/client/_version.py @@ -4,10 +4,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "2.0.0b1" +__version__: str = "2.0.0rc1" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.841.0" -__user_agent__: str = "speakeasy-sdk/python 2.0.0b1 2.841.0 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 2.0.0rc1 2.841.0 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/client/embeddings.py b/src/mistralai/client/embeddings.py index 5f9d3b9c..5d55ffc4 100644 --- a/src/mistralai/client/embeddings.py +++ b/src/mistralai/client/embeddings.py @@ -33,8 +33,8 @@ def create( Embeddings - :param model: The ID of the model to be used for embedding. - :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. + :param model: ID of the model to use. + :param inputs: Text to embed. :param metadata: :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. :param output_dtype: @@ -143,8 +143,8 @@ async def create_async( Embeddings - :param model: The ID of the model to be used for embedding. - :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. + :param model: ID of the model to use. + :param inputs: Text to embed. :param metadata: :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. :param output_dtype: diff --git a/src/mistralai/client/models/__init__.py b/src/mistralai/client/models/__init__.py index 5ef8b3f3..7d2dfd97 100644 --- a/src/mistralai/client/models/__init__.py +++ b/src/mistralai/client/models/__init__.py @@ -734,6 +734,7 @@ from .paginationinfo import PaginationInfo, PaginationInfoTypedDict from .prediction import Prediction, PredictionTypedDict from .processingstatusout import ProcessingStatusOut, ProcessingStatusOutTypedDict + from .processstatus import ProcessStatus from .realtimetranscriptionerror import ( RealtimeTranscriptionError, RealtimeTranscriptionErrorTypedDict, @@ -1475,6 +1476,7 @@ "PaginationInfoTypedDict", "Prediction", "PredictionTypedDict", + "ProcessStatus", "ProcessingStatusOut", "ProcessingStatusOutTypedDict", "RealtimeTranscriptionError", @@ -2199,6 +2201,7 @@ "PredictionTypedDict": ".prediction", "ProcessingStatusOut": ".processingstatusout", "ProcessingStatusOutTypedDict": ".processingstatusout", + "ProcessStatus": ".processstatus", "RealtimeTranscriptionError": ".realtimetranscriptionerror", "RealtimeTranscriptionErrorTypedDict": ".realtimetranscriptionerror", "RealtimeTranscriptionErrorDetail": ".realtimetranscriptionerrordetail", diff --git a/src/mistralai/client/models/document.py b/src/mistralai/client/models/document.py index 31eebbd1..fcc5bca5 100644 --- a/src/mistralai/client/models/document.py +++ b/src/mistralai/client/models/document.py @@ -2,6 +2,7 @@ # @generated-id: fbbf7428328c from __future__ import annotations +from .processstatus import ProcessStatus from datetime import datetime from mistralai.client.types import ( BaseModel, @@ -24,6 +25,7 @@ class DocumentTypedDict(TypedDict): size: Nullable[int] name: str created_at: datetime + process_status: ProcessStatus uploaded_by_id: Nullable[str] uploaded_by_type: str processing_status: str @@ -54,6 +56,8 @@ class Document(BaseModel): created_at: datetime + process_status: ProcessStatus + uploaded_by_id: Nullable[str] uploaded_by_type: str diff --git a/src/mistralai/client/models/embeddingrequest.py b/src/mistralai/client/models/embeddingrequest.py index 15950590..5fa2d2f6 100644 --- a/src/mistralai/client/models/embeddingrequest.py +++ b/src/mistralai/client/models/embeddingrequest.py @@ -20,18 +20,18 @@ EmbeddingRequestInputsTypedDict = TypeAliasType( "EmbeddingRequestInputsTypedDict", Union[str, List[str]] ) -r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" +r"""Text to embed.""" EmbeddingRequestInputs = TypeAliasType("EmbeddingRequestInputs", Union[str, List[str]]) -r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" +r"""Text to embed.""" class EmbeddingRequestTypedDict(TypedDict): model: str - r"""The ID of the model to be used for embedding.""" + r"""ID of the model to use.""" inputs: EmbeddingRequestInputsTypedDict - r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" + r"""Text to embed.""" metadata: NotRequired[Nullable[Dict[str, Any]]] output_dimension: NotRequired[Nullable[int]] r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" @@ -41,10 +41,10 @@ class EmbeddingRequestTypedDict(TypedDict): class EmbeddingRequest(BaseModel): model: str - r"""The ID of the model to be used for embedding.""" + r"""ID of the model to use.""" inputs: Annotated[EmbeddingRequestInputs, pydantic.Field(alias="input")] - r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" + r"""Text to embed.""" metadata: OptionalNullable[Dict[str, Any]] = UNSET diff --git a/src/mistralai/client/models/processingstatusout.py b/src/mistralai/client/models/processingstatusout.py index 3acadcc9..ed2a4f22 100644 --- a/src/mistralai/client/models/processingstatusout.py +++ b/src/mistralai/client/models/processingstatusout.py @@ -2,16 +2,20 @@ # @generated-id: 3df842c4140f from __future__ import annotations +from .processstatus import ProcessStatus from mistralai.client.types import BaseModel from typing_extensions import TypedDict class ProcessingStatusOutTypedDict(TypedDict): document_id: str + process_status: ProcessStatus processing_status: str class ProcessingStatusOut(BaseModel): document_id: str + process_status: ProcessStatus + processing_status: str diff --git a/src/mistralai/client/models/processstatus.py b/src/mistralai/client/models/processstatus.py new file mode 100644 index 00000000..15bdce20 --- /dev/null +++ b/src/mistralai/client/models/processstatus.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0205512146e6 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ProcessStatus = Union[ + Literal[ + "self_managed", + "missing_content", + "noop", + "done", + "todo", + "in_progress", + "error", + "waiting_for_capacity", + ], + UnrecognizedStr, +] diff --git a/uv.lock b/uv.lock index 1a37a7d6..7139deab 100644 --- a/uv.lock +++ b/uv.lock @@ -563,7 +563,7 @@ wheels = [ [[package]] name = "mistralai" -version = "2.0.0b1" +version = "2.0.0rc1" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, From 40dc89035b7d07e54370beff4b9ecf5f8751525b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 2 Mar 2026 14:11:25 +0100 Subject: [PATCH 41/42] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20-?= =?UTF-8?q?=20Generate=20MISTRAL-PYTHON-SDK-GOOGLE-CLOUD=20MISTRALAI-GCP-S?= =?UTF-8?q?DK=202.0.0rc1=20(#396)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.729.0 * chore: align GCP pyproject.toml and uv.lock to version 2.0.0rc1 --------- Co-authored-by: speakeasybot Co-authored-by: GitHub Action --- .speakeasy/workflow.lock | 12 ++++++------ packages/gcp/.speakeasy/gen.lock | 16 ++++++++-------- packages/gcp/.speakeasy/gen.yaml | 2 +- packages/gcp/RELEASES.md | 12 +++++++++++- packages/gcp/pyproject.toml | 2 +- .../gcp/src/mistralai/gcp/client/_version.py | 4 ++-- packages/gcp/uv.lock | 2 +- 7 files changed, 30 insertions(+), 20 deletions(-) diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 2b919f8b..72bce856 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -9,11 +9,11 @@ sources: - speakeasy-sdk-regen-1772041212 mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:4d9938ab74c4d41d62cd24234c8b8109e286c4aeec093e21d369259a43173113 - sourceBlobDigest: sha256:5a558d5ea7a936723c7a5540db5a1fba63d85d25b453372e1cf16395b30c98d3 + sourceRevisionDigest: sha256:b2ffad81388f92b1018cb5fe2d409920d795a4b9cd18d8d4147d06b6e53585eb + sourceBlobDigest: sha256:9fbc256371243c39284852f9c44ea59244205fe4265fddf867903d3eb60f17fd tags: - latest - - speakeasy-sdk-regen-1772041030 + - speakeasy-sdk-regen-1772455561 mistral-openapi: sourceNamespace: mistral-openapi sourceRevisionDigest: sha256:52cd52dd6272c5afe08072790b36e34de9a65c41357bab87a45cf6635dc30db0 @@ -32,10 +32,10 @@ targets: mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:4d9938ab74c4d41d62cd24234c8b8109e286c4aeec093e21d369259a43173113 - sourceBlobDigest: sha256:5a558d5ea7a936723c7a5540db5a1fba63d85d25b453372e1cf16395b30c98d3 + sourceRevisionDigest: sha256:b2ffad81388f92b1018cb5fe2d409920d795a4b9cd18d8d4147d06b6e53585eb + sourceBlobDigest: sha256:9fbc256371243c39284852f9c44ea59244205fe4265fddf867903d3eb60f17fd codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:35f30ba8ce4bd70f58b6abc5222d0bbf82eecc3109b09ca99df4406e363e21a0 + codeSamplesRevisionDigest: sha256:79c3a6d89d6c6f01f0400a619531f4f1cf18875754ff488558c337535fd83ce9 mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi diff --git a/packages/gcp/.speakeasy/gen.lock b/packages/gcp/.speakeasy/gen.lock index 6e33773d..e7932c9e 100644 --- a/packages/gcp/.speakeasy/gen.lock +++ b/packages/gcp/.speakeasy/gen.lock @@ -1,20 +1,20 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: bc4a0ba9c38418d84a6a8a76b503977b + docChecksum: 58bc2dcdd83a2b7c4856971baa20641b docVersion: 1.0.0 speakeasyVersion: 1.729.0 generationVersion: 2.841.0 - releaseVersion: 2.0.0b1 - configChecksum: 9cea6a311ff15502c47b0ef87e9846a2 + releaseVersion: 2.0.0rc1 + configChecksum: 5b0554f4a04c849bc6f376bec69654aa repoURL: https://github.com/mistralai/client-python.git repoSubDirectory: packages/gcp installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/gcp published: true persistentEdits: - generation_id: e503bb37-7bdd-4ebf-9bed-a8f754c99f8a - pristine_commit_hash: f14b1b1288437b7fc0ba666a384614a225385259 - pristine_tree_hash: 67e6d0a84ae20666a636dcc8ad174647a96b105f + generation_id: 724d8b32-d30d-4743-9e65-0bd450961ed2 + pristine_commit_hash: 7ef9f2e9ed9ed33b6f502afc01b1354bd0c499d1 + pristine_tree_hash: 19a1b62b3168a95e58538e2da4215028949b1ba2 features: python: additionalDependencies: 1.0.0 @@ -326,8 +326,8 @@ trackedFiles: pristine_git_object: ea95bed210db9180824efddfb1b3e47f5bf96489 src/mistralai/gcp/client/_version.py: id: f87319e32c7b - last_write_checksum: sha1:0d99fadc73b957112022a95eabeb0e3a98d14ff4 - pristine_git_object: 36e44a5e6067e8bd197b38cc238686f660c77244 + last_write_checksum: sha1:05656d6552e7c9cc97c9bbe6483ee906050d28ea + pristine_git_object: 7415341f2a4519b074f4b8ffa0f06c5ac2ac45e3 src/mistralai/gcp/client/basesdk.py: id: 4d594572857b last_write_checksum: sha1:d8ef9e2f4fa97d402eb9f5472ceb80fb39693991 diff --git a/packages/gcp/.speakeasy/gen.yaml b/packages/gcp/.speakeasy/gen.yaml index 18f4b4d5..35a47062 100644 --- a/packages/gcp/.speakeasy/gen.yaml +++ b/packages/gcp/.speakeasy/gen.yaml @@ -30,7 +30,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0b1 + version: 2.0.0rc1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/gcp/RELEASES.md b/packages/gcp/RELEASES.md index ec883c62..2261c857 100644 --- a/packages/gcp/RELEASES.md +++ b/packages/gcp/RELEASES.md @@ -18,4 +18,14 @@ Based on: ### Generated - [python v2.0.0b1] packages/gcp ### Releases -- [PyPI v2.0.0b1] https://pypi.org/project/mistralai-gcp/2.0.0b1 - packages/gcp \ No newline at end of file +- [PyPI v2.0.0b1] https://pypi.org/project/mistralai-gcp/2.0.0b1 - packages/gcp + +## 2026-03-02 12:45:41 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0rc1] packages/gcp +### Releases +- [PyPI v2.0.0rc1] https://pypi.org/project/mistralai-gcp/2.0.0rc1 - packages/gcp \ No newline at end of file diff --git a/packages/gcp/pyproject.toml b/packages/gcp/pyproject.toml index c0497656..e0c4af9a 100644 --- a/packages/gcp/pyproject.toml +++ b/packages/gcp/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai-gcp" -version = "2.0.0b1" +version = "2.0.0rc1" description = "Python Client SDK for the Mistral AI API in GCP." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/packages/gcp/src/mistralai/gcp/client/_version.py b/packages/gcp/src/mistralai/gcp/client/_version.py index 36e44a5e..7415341f 100644 --- a/packages/gcp/src/mistralai/gcp/client/_version.py +++ b/packages/gcp/src/mistralai/gcp/client/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-gcp" -__version__: str = "2.0.0b1" +__version__: str = "2.0.0rc1" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.841.0" -__user_agent__: str = "speakeasy-sdk/python 2.0.0b1 2.841.0 1.0.0 mistralai-gcp" +__user_agent__: str = "speakeasy-sdk/python 2.0.0rc1 2.841.0 1.0.0 mistralai-gcp" try: if __package__ is not None: diff --git a/packages/gcp/uv.lock b/packages/gcp/uv.lock index 9bd9f9b6..0c978164 100644 --- a/packages/gcp/uv.lock +++ b/packages/gcp/uv.lock @@ -277,7 +277,7 @@ wheels = [ [[package]] name = "mistralai-gcp" -version = "2.0.0b1" +version = "2.0.0rc1" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, From d6f19949c8cc57cbffb36c19067c94a7769b93b5 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 2 Mar 2026 14:41:02 +0100 Subject: [PATCH 42/42] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20-?= =?UTF-8?q?=20Generate=20MISTRAL-PYTHON-SDK-AZURE=20MISTRALAI-AZURE-SDK=20?= =?UTF-8?q?2.0.0rc1=20(#397)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.729.0 * chore: align Azure pyproject.toml and uv.lock to version 2.0.0rc1 --------- Co-authored-by: speakeasybot Co-authored-by: GitHub Action --- .speakeasy/workflow.lock | 12 ++++++------ packages/azure/.speakeasy/gen.lock | 16 ++++++++-------- packages/azure/.speakeasy/gen.yaml | 2 +- packages/azure/RELEASES.md | 12 +++++++++++- packages/azure/pyproject.toml | 2 +- .../azure/src/mistralai/azure/client/_version.py | 4 ++-- packages/azure/uv.lock | 2 +- 7 files changed, 30 insertions(+), 20 deletions(-) diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 72bce856..0e0db8ba 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -2,11 +2,11 @@ speakeasyVersion: 1.729.0 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:e32d21a6317d1bca6ab29f05603b96038e841752c2698aab47f434ea0d6530b7 - sourceBlobDigest: sha256:2dad2b1b7a79de6917c363ce7e870d11efe31ac08e3bfe0258f72823fe1ad13e + sourceRevisionDigest: sha256:0ac44fcf0cc7e7b8e101cb781a3699d5ccd37e0d80a5583c582ac7f0396803ac + sourceBlobDigest: sha256:4707ee1cbefa98516d39020e9cb29d7593fbd101a7d5c9c5fa50c44da3d4dce6 tags: - latest - - speakeasy-sdk-regen-1772041212 + - speakeasy-sdk-regen-1772457322 mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud sourceRevisionDigest: sha256:b2ffad81388f92b1018cb5fe2d409920d795a4b9cd18d8d4147d06b6e53585eb @@ -25,10 +25,10 @@ targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:e32d21a6317d1bca6ab29f05603b96038e841752c2698aab47f434ea0d6530b7 - sourceBlobDigest: sha256:2dad2b1b7a79de6917c363ce7e870d11efe31ac08e3bfe0258f72823fe1ad13e + sourceRevisionDigest: sha256:0ac44fcf0cc7e7b8e101cb781a3699d5ccd37e0d80a5583c582ac7f0396803ac + sourceBlobDigest: sha256:4707ee1cbefa98516d39020e9cb29d7593fbd101a7d5c9c5fa50c44da3d4dce6 codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:68866aada6ad13253e32dab06e4876a7aeba4d7759683d81b2ba27f0fb55a342 + codeSamplesRevisionDigest: sha256:67fa6ead333ffe784141b51b231bca05411189fd92f2e5fe3b9f861b97db5942 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud diff --git a/packages/azure/.speakeasy/gen.lock b/packages/azure/.speakeasy/gen.lock index 5da824d1..ad85f076 100644 --- a/packages/azure/.speakeasy/gen.lock +++ b/packages/azure/.speakeasy/gen.lock @@ -1,20 +1,20 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 571037b8485712afcef86703debb7f15 + docChecksum: be10e9d3b72d49632e8d6d311b9b8193 docVersion: 1.0.0 speakeasyVersion: 1.729.0 generationVersion: 2.841.0 - releaseVersion: 2.0.0b1 - configChecksum: 01160bf17a4abd1ce038528d20cd4685 + releaseVersion: 2.0.0rc1 + configChecksum: b06d8b151630e956c8b778cdc6c54c06 repoURL: https://github.com/mistralai/client-python.git repoSubDirectory: packages/azure installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/azure published: true persistentEdits: - generation_id: 2f5b7e40-9bd2-4c96-9e97-16a92e4b44af - pristine_commit_hash: 480a8b0e23da7e4752e6ad5b36fc72651e09d2d7 - pristine_tree_hash: 8a4c9b9a253fbe496a52e0496fa7e58e91e32c7c + generation_id: f76a86de-7619-48b9-8987-e2ecf1378ceb + pristine_commit_hash: 8b77e997fc0b0da49806cf251be6f273cf32cdfb + pristine_tree_hash: 2ff5921bc10f855310c77650bdc4622a57fc63fb features: python: additionalDependencies: 1.0.0 @@ -354,8 +354,8 @@ trackedFiles: pristine_git_object: 3e4e39555d60adebe84e596c8323ee5b80676fc9 src/mistralai/azure/client/_version.py: id: a77160e60e5d - last_write_checksum: sha1:1b76e9448049c69dbdb690b9de25456378bba0a7 - pristine_git_object: 213648be87a19e24d87160c1286614b2d5df7344 + last_write_checksum: sha1:79494d1fb83ebffd4ad7b285299d4fea4e5a4e92 + pristine_git_object: 4d15bdc8f877a1616c6d1f978037f76e0487bd6c src/mistralai/azure/client/basesdk.py: id: 5a585a95ec21 last_write_checksum: sha1:0c2e686aa42d6aeeb103193aa058d6ddff7bcf74 diff --git a/packages/azure/.speakeasy/gen.yaml b/packages/azure/.speakeasy/gen.yaml index 55934cc8..518e1e25 100644 --- a/packages/azure/.speakeasy/gen.yaml +++ b/packages/azure/.speakeasy/gen.yaml @@ -30,7 +30,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0b1 + version: 2.0.0rc1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/azure/RELEASES.md b/packages/azure/RELEASES.md index e625ee98..2090c67a 100644 --- a/packages/azure/RELEASES.md +++ b/packages/azure/RELEASES.md @@ -18,4 +18,14 @@ Based on: ### Generated - [python v2.0.0b1] packages/azure ### Releases -- [PyPI v2.0.0b1] https://pypi.org/project/mistralai-azure/2.0.0b1 - packages/azure \ No newline at end of file +- [PyPI v2.0.0b1] https://pypi.org/project/mistralai-azure/2.0.0b1 - packages/azure + +## 2026-03-02 13:15:00 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0rc1] packages/azure +### Releases +- [PyPI v2.0.0rc1] https://pypi.org/project/mistralai-azure/2.0.0rc1 - packages/azure \ No newline at end of file diff --git a/packages/azure/pyproject.toml b/packages/azure/pyproject.toml index cf80bde8..000dd558 100644 --- a/packages/azure/pyproject.toml +++ b/packages/azure/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai-azure" -version = "2.0.0b1" +version = "2.0.0rc1" description = "Python Client SDK for the Mistral AI API in Azure." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/packages/azure/src/mistralai/azure/client/_version.py b/packages/azure/src/mistralai/azure/client/_version.py index 213648be..4d15bdc8 100644 --- a/packages/azure/src/mistralai/azure/client/_version.py +++ b/packages/azure/src/mistralai/azure/client/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-azure" -__version__: str = "2.0.0b1" +__version__: str = "2.0.0rc1" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.841.0" -__user_agent__: str = "speakeasy-sdk/python 2.0.0b1 2.841.0 1.0.0 mistralai-azure" +__user_agent__: str = "speakeasy-sdk/python 2.0.0rc1 2.841.0 1.0.0 mistralai-azure" try: if __package__ is not None: diff --git a/packages/azure/uv.lock b/packages/azure/uv.lock index 7c090c00..df3719ff 100644 --- a/packages/azure/uv.lock +++ b/packages/azure/uv.lock @@ -154,7 +154,7 @@ wheels = [ [[package]] name = "mistralai-azure" -version = "2.0.0b1" +version = "2.0.0rc1" source = { editable = "." } dependencies = [ { name = "httpcore" },